VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp@ 96789

Last change on this file since 96789 was 96407, checked in by vboxsync, 2 years ago

scm copyright and license note update

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 75.2 KB
Line 
1/* $Id: PGMAllHandler.cpp 96407 2022-08-22 17:43:14Z vboxsync $ */
2/** @file
3 * PGM - Page Manager / Monitor, Access Handlers.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_PGM
33#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
34#include <VBox/vmm/dbgf.h>
35#include <VBox/vmm/pgm.h>
36#include <VBox/vmm/iom.h>
37#include <VBox/vmm/mm.h>
38#include <VBox/vmm/em.h>
39#include <VBox/vmm/nem.h>
40#include <VBox/vmm/stam.h>
41#include <VBox/vmm/dbgf.h>
42#ifdef IN_RING0
43# include <VBox/vmm/pdmdev.h>
44#endif
45#include "PGMInternal.h"
46#include <VBox/vmm/vmcc.h>
47#include "PGMInline.h"
48
49#include <VBox/log.h>
50#include <iprt/assert.h>
51#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
52# include <iprt/asm-amd64-x86.h>
53#endif
54#include <iprt/string.h>
55#include <VBox/param.h>
56#include <VBox/err.h>
57#include <VBox/vmm/selm.h>
58
59
60/*********************************************************************************************************************************
61* Global Variables *
62*********************************************************************************************************************************/
63/** Dummy physical access handler type record. */
64CTX_SUFF(PGMPHYSHANDLERTYPEINT) const g_pgmHandlerPhysicalDummyType =
65{
66 /* .hType = */ UINT64_C(0x93b7557e1937aaff),
67 /* .enmKind = */ PGMPHYSHANDLERKIND_INVALID,
68 /* .uState = */ PGM_PAGE_HNDL_PHYS_STATE_ALL,
69 /* .fKeepPgmLock = */ true,
70 /* .fRing0DevInsIdx = */ false,
71#ifdef IN_RING0
72 /* .afPadding = */ {false},
73 /* .pfnHandler = */ pgmR0HandlerPhysicalHandlerToRing3,
74 /* .pfnPfHandler = */ pgmR0HandlerPhysicalPfHandlerToRing3,
75#elif defined(IN_RING3)
76 /* .fRing0Enabled = */ false,
77 /* .pfnHandler = */ pgmR3HandlerPhysicalHandlerInvalid,
78#else
79# error "unsupported context"
80#endif
81 /* .pszDesc = */ "dummy"
82};
83
84
85/*********************************************************************************************************************************
86* Internal Functions *
87*********************************************************************************************************************************/
88static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVMCC pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam,
89 void *pvBitmap, uint32_t offBitmap);
90static void pgmHandlerPhysicalDeregisterNotifyNEM(PVMCC pVM, PPGMPHYSHANDLER pCur);
91static void pgmHandlerPhysicalResetRamFlags(PVMCC pVM, PPGMPHYSHANDLER pCur);
92
93
94#ifndef IN_RING3
95
96/**
97 * @callback_method_impl{FNPGMPHYSHANDLER,
98 * Dummy for forcing ring-3 handling of the access.}
99 */
100DECLCALLBACK(VBOXSTRICTRC)
101pgmR0HandlerPhysicalHandlerToRing3(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
102 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, uint64_t uUser)
103{
104 RT_NOREF(pVM, pVCpu, GCPhys, pvPhys, pvBuf, cbBuf, enmAccessType, enmOrigin, uUser);
105 return VINF_EM_RAW_EMULATE_INSTR;
106}
107
108
109/**
110 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
111 * Dummy for forcing ring-3 handling of the access.}
112 */
113DECLCALLBACK(VBOXSTRICTRC)
114pgmR0HandlerPhysicalPfHandlerToRing3(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
115 RTGCPTR pvFault, RTGCPHYS GCPhysFault, uint64_t uUser)
116{
117 RT_NOREF(pVM, pVCpu, uErrorCode, pRegFrame, pvFault, GCPhysFault, uUser);
118 return VINF_EM_RAW_EMULATE_INSTR;
119}
120
121#endif /* !IN_RING3 */
122
123
124/**
125 * Creates a physical access handler, allocation part.
126 *
127 * @returns VBox status code.
128 * @retval VERR_OUT_OF_RESOURCES if no more handlers available.
129 *
130 * @param pVM The cross context VM structure.
131 * @param hType The handler type registration handle.
132 * @param uUser User argument to the handlers (not pointer).
133 * @param pszDesc Description of this handler. If NULL, the type
134 * description will be used instead.
135 * @param ppPhysHandler Where to return the access handler structure on
136 * success.
137 */
138int pgmHandlerPhysicalExCreate(PVMCC pVM, PGMPHYSHANDLERTYPE hType, uint64_t uUser,
139 R3PTRTYPE(const char *) pszDesc, PPGMPHYSHANDLER *ppPhysHandler)
140{
141 /*
142 * Validate input.
143 */
144 PCPGMPHYSHANDLERTYPEINT const pType = pgmHandlerPhysicalTypeHandleToPtr(pVM, hType);
145 AssertReturn(pType, VERR_INVALID_HANDLE);
146 AssertReturn(pType->enmKind > PGMPHYSHANDLERKIND_INVALID && pType->enmKind < PGMPHYSHANDLERKIND_END, VERR_INVALID_HANDLE);
147 AssertPtr(ppPhysHandler);
148
149 Log(("pgmHandlerPhysicalExCreate: uUser=%#RX64 hType=%#x (%d, %s) pszDesc=%RHv:%s\n",
150 uUser, hType, pType->enmKind, pType->pszDesc, pszDesc, R3STRING(pszDesc)));
151
152 /*
153 * Allocate and initialize the new entry.
154 */
155 int rc = PGM_LOCK(pVM);
156 AssertRCReturn(rc, rc);
157
158 PPGMPHYSHANDLER pNew = pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator.allocateNode();
159 if (pNew)
160 {
161 pNew->Key = NIL_RTGCPHYS;
162 pNew->KeyLast = NIL_RTGCPHYS;
163 pNew->cPages = 0;
164 pNew->cAliasedPages = 0;
165 pNew->cTmpOffPages = 0;
166 pNew->uUser = uUser;
167 pNew->hType = hType;
168 pNew->pszDesc = pszDesc != NIL_RTR3PTR ? pszDesc
169#ifdef IN_RING3
170 : pType->pszDesc;
171#else
172 : pVM->pgm.s.aPhysHandlerTypes[hType & PGMPHYSHANDLERTYPE_IDX_MASK].pszDesc;
173#endif
174
175 PGM_UNLOCK(pVM);
176 *ppPhysHandler = pNew;
177 return VINF_SUCCESS;
178 }
179
180 PGM_UNLOCK(pVM);
181 return VERR_OUT_OF_RESOURCES;
182}
183
184
185/**
186 * Duplicates a physical access handler.
187 *
188 * @returns VBox status code.
189 * @retval VINF_SUCCESS when successfully installed.
190 *
191 * @param pVM The cross context VM structure.
192 * @param pPhysHandlerSrc The source handler to duplicate
193 * @param ppPhysHandler Where to return the access handler structure on
194 * success.
195 */
196int pgmHandlerPhysicalExDup(PVMCC pVM, PPGMPHYSHANDLER pPhysHandlerSrc, PPGMPHYSHANDLER *ppPhysHandler)
197{
198 return pgmHandlerPhysicalExCreate(pVM, pPhysHandlerSrc->hType, pPhysHandlerSrc->uUser,
199 pPhysHandlerSrc->pszDesc, ppPhysHandler);
200}
201
202
203/**
204 * Register a access handler for a physical range.
205 *
206 * @returns VBox status code.
207 * @retval VINF_SUCCESS when successfully installed.
208 * @retval VINF_PGM_GCPHYS_ALIASED could be returned.
209 *
210 * @param pVM The cross context VM structure.
211 * @param pPhysHandler The physical handler.
212 * @param GCPhys Start physical address.
213 * @param GCPhysLast Last physical address. (inclusive)
214 */
215int pgmHandlerPhysicalExRegister(PVMCC pVM, PPGMPHYSHANDLER pPhysHandler, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast)
216{
217 /*
218 * Validate input.
219 */
220 AssertReturn(pPhysHandler, VERR_INVALID_POINTER);
221 PGMPHYSHANDLERTYPE const hType = pPhysHandler->hType;
222 PCPGMPHYSHANDLERTYPEINT const pType = pgmHandlerPhysicalTypeHandleToPtr(pVM, hType);
223 AssertReturn(pType, VERR_INVALID_HANDLE);
224 AssertReturn(pType->enmKind > PGMPHYSHANDLERKIND_INVALID && pType->enmKind < PGMPHYSHANDLERKIND_END, VERR_INVALID_HANDLE);
225
226 AssertPtr(pPhysHandler);
227
228 Log(("pgmHandlerPhysicalExRegister: GCPhys=%RGp GCPhysLast=%RGp hType=%#x (%d, %s) pszDesc=%RHv:%s\n", GCPhys, GCPhysLast,
229 hType, pType->enmKind, pType->pszDesc, pPhysHandler->pszDesc, R3STRING(pPhysHandler->pszDesc)));
230 AssertReturn(pPhysHandler->Key == NIL_RTGCPHYS, VERR_WRONG_ORDER);
231
232 AssertMsgReturn(GCPhys < GCPhysLast, ("GCPhys >= GCPhysLast (%#x >= %#x)\n", GCPhys, GCPhysLast), VERR_INVALID_PARAMETER);
233 Assert(GCPhysLast - GCPhys < _4G); /* ASSUMPTION in PGMAllPhys.cpp */
234
235 switch (pType->enmKind)
236 {
237 case PGMPHYSHANDLERKIND_WRITE:
238 break;
239 case PGMPHYSHANDLERKIND_MMIO:
240 case PGMPHYSHANDLERKIND_ALL:
241 /* Simplification for PGMPhysRead, PGMR0Trap0eHandlerNPMisconfig and others: Full pages. */
242 AssertMsgReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_INVALID_PARAMETER);
243 AssertMsgReturn((GCPhysLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK, ("%RGp\n", GCPhysLast), VERR_INVALID_PARAMETER);
244 break;
245 default:
246 AssertMsgFailed(("Invalid input enmKind=%d!\n", pType->enmKind));
247 return VERR_INVALID_PARAMETER;
248 }
249
250 /*
251 * We require the range to be within registered ram.
252 * There is no apparent need to support ranges which cover more than one ram range.
253 */
254 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
255 if ( !pRam
256 || GCPhysLast > pRam->GCPhysLast)
257 {
258#ifdef IN_RING3
259 DBGFR3Info(pVM->pUVM, "phys", NULL, NULL);
260#endif
261 AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));
262 return VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
263 }
264 Assert(GCPhys >= pRam->GCPhys && GCPhys < pRam->GCPhysLast);
265 Assert(GCPhysLast <= pRam->GCPhysLast && GCPhysLast >= pRam->GCPhys);
266
267 /*
268 * Try insert into list.
269 */
270 pPhysHandler->Key = GCPhys;
271 pPhysHandler->KeyLast = GCPhysLast;
272 pPhysHandler->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
273
274 int rc = PGM_LOCK(pVM);
275 if (RT_SUCCESS(rc))
276 {
277 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->insert(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, pPhysHandler);
278 if (RT_SUCCESS(rc))
279 {
280 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pPhysHandler, pRam, NULL /*pvBitmap*/, 0 /*offBitmap*/);
281 if (rc == VINF_PGM_SYNC_CR3)
282 rc = VINF_PGM_GCPHYS_ALIASED;
283
284#if defined(IN_RING3) || defined(IN_RING0)
285 NEMHCNotifyHandlerPhysicalRegister(pVM, pType->enmKind, GCPhys, GCPhysLast - GCPhys + 1);
286#endif
287 PGM_UNLOCK(pVM);
288
289 if (rc != VINF_SUCCESS)
290 Log(("PGMHandlerPhysicalRegisterEx: returns %Rrc (%RGp-%RGp)\n", rc, GCPhys, GCPhysLast));
291 return rc;
292 }
293 PGM_UNLOCK(pVM);
294 }
295
296 pPhysHandler->Key = NIL_RTGCPHYS;
297 pPhysHandler->KeyLast = NIL_RTGCPHYS;
298
299 AssertMsgReturn(rc == VERR_ALREADY_EXISTS, ("%Rrc GCPhys=%RGp GCPhysLast=%RGp\n", rc, GCPhys, GCPhysLast), rc);
300
301#if defined(IN_RING3) && defined(VBOX_STRICT)
302 DBGFR3Info(pVM->pUVM, "handlers", "phys nostats", NULL);
303#endif
304 AssertMsgFailed(("Conflict! GCPhys=%RGp GCPhysLast=%RGp pszDesc=%s/%s\n",
305 GCPhys, GCPhysLast, R3STRING(pPhysHandler->pszDesc), R3STRING(pType->pszDesc)));
306 return VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
307}
308
309
310/**
311 * Register a access handler for a physical range.
312 *
313 * @returns VBox status code.
314 * @retval VINF_SUCCESS when successfully installed.
315 * @retval VINF_PGM_GCPHYS_ALIASED when the shadow PTs could be updated because
316 * the guest page aliased or/and mapped by multiple PTs. A CR3 sync has been
317 * flagged together with a pool clearing.
318 * @retval VERR_PGM_HANDLER_PHYSICAL_CONFLICT if the range conflicts with an existing
319 * one. A debug assertion is raised.
320 *
321 * @param pVM The cross context VM structure.
322 * @param GCPhys Start physical address.
323 * @param GCPhysLast Last physical address. (inclusive)
324 * @param hType The handler type registration handle.
325 * @param uUser User argument to the handler.
326 * @param pszDesc Description of this handler. If NULL, the type
327 * description will be used instead.
328 */
329VMMDECL(int) PGMHandlerPhysicalRegister(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast, PGMPHYSHANDLERTYPE hType,
330 uint64_t uUser, R3PTRTYPE(const char *) pszDesc)
331{
332#ifdef LOG_ENABLED
333 PCPGMPHYSHANDLERTYPEINT pType = pgmHandlerPhysicalTypeHandleToPtr(pVM, hType);
334 Log(("PGMHandlerPhysicalRegister: GCPhys=%RGp GCPhysLast=%RGp uUser=%#RX64 hType=%#x (%d, %s) pszDesc=%RHv:%s\n",
335 GCPhys, GCPhysLast, uUser, hType, pType->enmKind, R3STRING(pType->pszDesc), pszDesc, R3STRING(pszDesc)));
336#endif
337
338 PPGMPHYSHANDLER pNew;
339 int rc = pgmHandlerPhysicalExCreate(pVM, hType, uUser, pszDesc, &pNew);
340 if (RT_SUCCESS(rc))
341 {
342 rc = pgmHandlerPhysicalExRegister(pVM, pNew, GCPhys, GCPhysLast);
343 if (RT_SUCCESS(rc))
344 return rc;
345 pgmHandlerPhysicalExDestroy(pVM, pNew);
346 }
347 return rc;
348}
349
350
351/**
352 * Sets ram range flags and attempts updating shadow PTs.
353 *
354 * @returns VBox status code.
355 * @retval VINF_SUCCESS when shadow PTs was successfully updated.
356 * @retval VINF_PGM_SYNC_CR3 when the shadow PTs could be updated because
357 * the guest page aliased or/and mapped by multiple PTs. FFs set.
358 * @param pVM The cross context VM structure.
359 * @param pCur The physical handler.
360 * @param pRam The RAM range.
361 * @param pvBitmap Dirty bitmap. Optional.
362 * @param offBitmap Dirty bitmap offset.
363 */
364static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVMCC pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam,
365 void *pvBitmap, uint32_t offBitmap)
366{
367 /*
368 * Iterate the guest ram pages updating the flags and flushing PT entries
369 * mapping the page.
370 */
371 bool fFlushTLBs = false;
372 int rc = VINF_SUCCESS;
373 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
374 const unsigned uState = pCurType->uState;
375 uint32_t cPages = pCur->cPages;
376 uint32_t i = (pCur->Key - pRam->GCPhys) >> GUEST_PAGE_SHIFT;
377 for (;;)
378 {
379 PPGMPAGE pPage = &pRam->aPages[i];
380 AssertMsg(pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO || PGM_PAGE_IS_MMIO(pPage),
381 ("%RGp %R[pgmpage]\n", pRam->GCPhys + (i << GUEST_PAGE_SHIFT), pPage));
382
383 /* Only do upgrades. */
384 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) < uState)
385 {
386 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, uState);
387
388 const RTGCPHYS GCPhysPage = pRam->GCPhys + (i << GUEST_PAGE_SHIFT);
389 int rc2 = pgmPoolTrackUpdateGCPhys(pVM, GCPhysPage, pPage,
390 false /* allow updates of PTEs (instead of flushing) */, &fFlushTLBs);
391 if (rc2 != VINF_SUCCESS && rc == VINF_SUCCESS)
392 rc = rc2;
393
394#ifdef VBOX_WITH_NATIVE_NEM
395 /* Tell NEM about the protection update. */
396 if (VM_IS_NEM_ENABLED(pVM))
397 {
398 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
399 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
400 NEMHCNotifyPhysPageProtChanged(pVM, GCPhysPage, PGM_PAGE_GET_HCPHYS(pPage),
401 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
402 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
403 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
404 }
405#endif
406 if (pvBitmap)
407 ASMBitSet(pvBitmap, offBitmap);
408 }
409
410 /* next */
411 if (--cPages == 0)
412 break;
413 i++;
414 offBitmap++;
415 }
416
417 if (fFlushTLBs)
418 {
419 PGM_INVL_ALL_VCPU_TLBS(pVM);
420 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: flushing guest TLBs; rc=%d\n", rc));
421 }
422 else
423 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: doesn't flush guest TLBs. rc=%Rrc; sync flags=%x VMCPU_FF_PGM_SYNC_CR3=%d\n", rc, VMMGetCpu(pVM)->pgm.s.fSyncFlags, VMCPU_FF_IS_SET(VMMGetCpu(pVM), VMCPU_FF_PGM_SYNC_CR3)));
424
425 return rc;
426}
427
428
429/**
430 * Deregister a physical page access handler.
431 *
432 * @returns VBox status code.
433 * @param pVM The cross context VM structure.
434 * @param pPhysHandler The handler to deregister (but not free).
435 */
436int pgmHandlerPhysicalExDeregister(PVMCC pVM, PPGMPHYSHANDLER pPhysHandler)
437{
438 LogFlow(("pgmHandlerPhysicalExDeregister: Removing Range %RGp-%RGp %s\n",
439 pPhysHandler->Key, pPhysHandler->KeyLast, R3STRING(pPhysHandler->pszDesc)));
440
441 int rc = PGM_LOCK(pVM);
442 AssertRCReturn(rc, rc);
443
444 RTGCPHYS const GCPhys = pPhysHandler->Key;
445 AssertReturnStmt(GCPhys != NIL_RTGCPHYS, PGM_UNLOCK(pVM), VERR_PGM_HANDLER_NOT_FOUND);
446
447 /*
448 * Remove the handler from the tree.
449 */
450
451 PPGMPHYSHANDLER pRemoved;
452 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->remove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pRemoved);
453 if (RT_SUCCESS(rc))
454 {
455 if (pRemoved == pPhysHandler)
456 {
457 /*
458 * Clear the page bits, notify the REM about this change and clear
459 * the cache.
460 */
461 pgmHandlerPhysicalResetRamFlags(pVM, pPhysHandler);
462 if (VM_IS_NEM_ENABLED(pVM))
463 pgmHandlerPhysicalDeregisterNotifyNEM(pVM, pPhysHandler);
464 pVM->pgm.s.idxLastPhysHandler = 0;
465
466 pPhysHandler->Key = NIL_RTGCPHYS;
467 pPhysHandler->KeyLast = NIL_RTGCPHYS;
468
469 PGM_UNLOCK(pVM);
470
471 return VINF_SUCCESS;
472 }
473
474 /*
475 * Both of the failure conditions here are considered internal processing
476 * errors because they can only be caused by race conditions or corruption.
477 * If we ever need to handle concurrent deregistration, we have to move
478 * the NIL_RTGCPHYS check inside the PGM lock.
479 */
480 pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->insert(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, pRemoved);
481 }
482
483 PGM_UNLOCK(pVM);
484
485 if (RT_FAILURE(rc))
486 AssertMsgFailed(("Didn't find range starting at %RGp in the tree! %Rrc=rc\n", GCPhys, rc));
487 else
488 AssertMsgFailed(("Found different handle at %RGp in the tree: got %p insteaded of %p\n",
489 GCPhys, pRemoved, pPhysHandler));
490 return VERR_PGM_HANDLER_IPE_1;
491}
492
493
494/**
495 * Destroys (frees) a physical handler.
496 *
497 * The caller must deregister it before destroying it!
498 *
499 * @returns VBox status code.
500 * @param pVM The cross context VM structure.
501 * @param pHandler The handler to free. NULL if ignored.
502 */
503int pgmHandlerPhysicalExDestroy(PVMCC pVM, PPGMPHYSHANDLER pHandler)
504{
505 if (pHandler)
506 {
507 AssertPtr(pHandler);
508 AssertReturn(pHandler->Key == NIL_RTGCPHYS, VERR_WRONG_ORDER);
509
510 int rc = PGM_LOCK(pVM);
511 if (RT_SUCCESS(rc))
512 {
513 rc = pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator.freeNode(pHandler);
514 PGM_UNLOCK(pVM);
515 }
516 return rc;
517 }
518 return VINF_SUCCESS;
519}
520
521
522/**
523 * Deregister a physical page access handler.
524 *
525 * @returns VBox status code.
526 * @param pVM The cross context VM structure.
527 * @param GCPhys Start physical address.
528 */
529VMMDECL(int) PGMHandlerPhysicalDeregister(PVMCC pVM, RTGCPHYS GCPhys)
530{
531 AssertReturn(pVM->VMCC_CTX(pgm).s.pPhysHandlerTree, VERR_PGM_HANDLER_IPE_1);
532
533 /*
534 * Find the handler.
535 */
536 int rc = PGM_LOCK(pVM);
537 AssertRCReturn(rc, rc);
538
539 PPGMPHYSHANDLER pRemoved;
540 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->remove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pRemoved);
541 if (RT_SUCCESS(rc))
542 {
543 Assert(pRemoved->Key == GCPhys);
544 LogFlow(("PGMHandlerPhysicalDeregister: Removing Range %RGp-%RGp %s\n",
545 pRemoved->Key, pRemoved->KeyLast, R3STRING(pRemoved->pszDesc)));
546
547 /*
548 * Clear the page bits, notify the REM about this change and clear
549 * the cache.
550 */
551 pgmHandlerPhysicalResetRamFlags(pVM, pRemoved);
552 if (VM_IS_NEM_ENABLED(pVM))
553 pgmHandlerPhysicalDeregisterNotifyNEM(pVM, pRemoved);
554 pVM->pgm.s.idxLastPhysHandler = 0;
555
556 pRemoved->Key = NIL_RTGCPHYS;
557 rc = pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator.freeNode(pRemoved);
558
559 PGM_UNLOCK(pVM);
560 return rc;
561 }
562
563 PGM_UNLOCK(pVM);
564
565 if (rc == VERR_NOT_FOUND)
566 {
567 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
568 rc = VERR_PGM_HANDLER_NOT_FOUND;
569 }
570 return rc;
571}
572
573
574/**
575 * Shared code with modify.
576 */
577static void pgmHandlerPhysicalDeregisterNotifyNEM(PVMCC pVM, PPGMPHYSHANDLER pCur)
578{
579#ifdef VBOX_WITH_NATIVE_NEM
580 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
581 RTGCPHYS GCPhysStart = pCur->Key;
582 RTGCPHYS GCPhysLast = pCur->KeyLast;
583
584 /*
585 * Page align the range.
586 *
587 * Since we've reset (recalculated) the physical handler state of all pages
588 * we can make use of the page states to figure out whether a page should be
589 * included in the REM notification or not.
590 */
591 if ( (pCur->Key & GUEST_PAGE_OFFSET_MASK)
592 || ((pCur->KeyLast + 1) & GUEST_PAGE_OFFSET_MASK))
593 {
594 Assert(pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO);
595
596 if (GCPhysStart & GUEST_PAGE_OFFSET_MASK)
597 {
598 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhysStart);
599 if ( pPage
600 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
601 {
602 RTGCPHYS GCPhys = (GCPhysStart + (GUEST_PAGE_SIZE - 1)) & X86_PTE_PAE_PG_MASK;
603 if ( GCPhys > GCPhysLast
604 || GCPhys < GCPhysStart)
605 return;
606 GCPhysStart = GCPhys;
607 }
608 else
609 GCPhysStart &= X86_PTE_PAE_PG_MASK;
610 Assert(!pPage || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO); /* these are page aligned atm! */
611 }
612
613 if (GCPhysLast & GUEST_PAGE_OFFSET_MASK)
614 {
615 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhysLast);
616 if ( pPage
617 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
618 {
619 RTGCPHYS GCPhys = (GCPhysLast & X86_PTE_PAE_PG_MASK) - 1;
620 if ( GCPhys < GCPhysStart
621 || GCPhys > GCPhysLast)
622 return;
623 GCPhysLast = GCPhys;
624 }
625 else
626 GCPhysLast |= GUEST_PAGE_OFFSET_MASK;
627 Assert(!pPage || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO); /* these are page aligned atm! */
628 }
629 }
630
631 /*
632 * Tell NEM.
633 */
634 PPGMRAMRANGE const pRam = pgmPhysGetRange(pVM, GCPhysStart);
635 RTGCPHYS const cb = GCPhysLast - GCPhysStart + 1;
636 uint8_t u2State = UINT8_MAX;
637 NEMHCNotifyHandlerPhysicalDeregister(pVM, pCurType->enmKind, GCPhysStart, cb,
638 pRam ? PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysStart) : NULL, &u2State);
639 if (u2State != UINT8_MAX && pRam)
640 pgmPhysSetNemStateForPages(&pRam->aPages[(GCPhysStart - pRam->GCPhys) >> GUEST_PAGE_SHIFT],
641 cb >> GUEST_PAGE_SHIFT, u2State);
642#else
643 RT_NOREF(pVM, pCur);
644#endif
645}
646
647
648/**
649 * pgmHandlerPhysicalResetRamFlags helper that checks for other handlers on
650 * edge pages.
651 */
652DECLINLINE(void) pgmHandlerPhysicalRecalcPageState(PVMCC pVM, RTGCPHYS GCPhys, bool fAbove, PPGMRAMRANGE *ppRamHint)
653{
654 /*
655 * Look for other handlers.
656 */
657 unsigned uState = PGM_PAGE_HNDL_PHYS_STATE_NONE;
658 for (;;)
659 {
660 PPGMPHYSHANDLER pCur;
661 int rc;
662 if (fAbove)
663 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookupMatchingOrAbove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator,
664 GCPhys, &pCur);
665 else
666 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookupMatchingOrBelow(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator,
667 GCPhys, &pCur);
668 if (rc == VERR_NOT_FOUND)
669 break;
670 AssertRCBreak(rc);
671 if (((fAbove ? pCur->Key : pCur->KeyLast) >> GUEST_PAGE_SHIFT) != (GCPhys >> GUEST_PAGE_SHIFT))
672 break;
673 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
674 uState = RT_MAX(uState, pCurType->uState);
675
676 /* next? */
677 RTGCPHYS GCPhysNext = fAbove
678 ? pCur->KeyLast + 1
679 : pCur->Key - 1;
680 if ((GCPhysNext >> GUEST_PAGE_SHIFT) != (GCPhys >> GUEST_PAGE_SHIFT))
681 break;
682 GCPhys = GCPhysNext;
683 }
684
685 /*
686 * Update if we found something that is a higher priority state than the current.
687 */
688 if (uState != PGM_PAGE_HNDL_PHYS_STATE_NONE)
689 {
690 PPGMPAGE pPage;
691 int rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, ppRamHint);
692 if ( RT_SUCCESS(rc)
693 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) < uState)
694 {
695 /* This should normally not be necessary. */
696 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, uState);
697 bool fFlushTLBs ;
698 rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhys, pPage, false /*fFlushPTEs*/, &fFlushTLBs);
699 if (RT_SUCCESS(rc) && fFlushTLBs)
700 PGM_INVL_ALL_VCPU_TLBS(pVM);
701 else
702 AssertRC(rc);
703
704#ifdef VBOX_WITH_NATIVE_NEM
705 /* Tell NEM about the protection update. */
706 if (VM_IS_NEM_ENABLED(pVM))
707 {
708 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
709 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
710 NEMHCNotifyPhysPageProtChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage),
711 PGM_RAMRANGE_CALC_PAGE_R3PTR(*ppRamHint, GCPhys),
712 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
713 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
714 }
715#endif
716 }
717 else
718 AssertRC(rc);
719 }
720}
721
722
723/**
724 * Resets an aliased page.
725 *
726 * @param pVM The cross context VM structure.
727 * @param pPage The page.
728 * @param GCPhysPage The page address in case it comes in handy.
729 * @param pRam The RAM range the page is associated with (for NEM
730 * notifications).
731 * @param fDoAccounting Whether to perform accounting. (Only set during
732 * reset where pgmR3PhysRamReset doesn't have the
733 * handler structure handy.)
734 */
735void pgmHandlerPhysicalResetAliasedPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage, PPGMRAMRANGE pRam, bool fDoAccounting)
736{
737 Assert( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
738 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO);
739 Assert(PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
740#ifdef VBOX_WITH_NATIVE_NEM
741 RTHCPHYS const HCPhysPrev = PGM_PAGE_GET_HCPHYS(pPage);
742#endif
743
744 /*
745 * Flush any shadow page table references *first*.
746 */
747 bool fFlushTLBs = false;
748 int rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhysPage, pPage, true /*fFlushPTEs*/, &fFlushTLBs);
749 AssertLogRelRCReturnVoid(rc);
750 HMFlushTlbOnAllVCpus(pVM);
751
752 /*
753 * Make it an MMIO/Zero page.
754 */
755 PGM_PAGE_SET_HCPHYS(pVM, pPage, pVM->pgm.s.HCPhysZeroPg);
756 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_MMIO);
757 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
758 PGM_PAGE_SET_PAGEID(pVM, pPage, NIL_GMM_PAGEID);
759 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_ALL);
760
761 /* Flush its TLB entry. */
762 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
763 /* Not calling IEMTlbInvalidateAllPhysicalAllCpus here as aliased pages are handled like MMIO by the IEM TLB. */
764
765 /*
766 * Do accounting for pgmR3PhysRamReset.
767 */
768 if (fDoAccounting)
769 {
770 PPGMPHYSHANDLER pHandler;
771 rc = pgmHandlerPhysicalLookup(pVM, GCPhysPage, &pHandler);
772 if (RT_SUCCESS(rc))
773 {
774 Assert(pHandler->cAliasedPages > 0);
775 pHandler->cAliasedPages--;
776 }
777 else
778 AssertMsgFailed(("rc=%Rrc GCPhysPage=%RGp\n", rc, GCPhysPage));
779 }
780
781#ifdef VBOX_WITH_NATIVE_NEM
782 /*
783 * Tell NEM about the protection change.
784 */
785 if (VM_IS_NEM_ENABLED(pVM))
786 {
787 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
788 NEMHCNotifyPhysPageChanged(pVM, GCPhysPage, HCPhysPrev, pVM->pgm.s.HCPhysZeroPg,
789 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
790 NEM_PAGE_PROT_NONE, PGMPAGETYPE_MMIO, &u2State);
791 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
792 }
793#else
794 RT_NOREF(pRam);
795#endif
796}
797
798
799/**
800 * Resets ram range flags.
801 *
802 * @returns VBox status code.
803 * @retval VINF_SUCCESS when shadow PTs was successfully updated.
804 * @param pVM The cross context VM structure.
805 * @param pCur The physical handler.
806 *
807 * @remark We don't start messing with the shadow page tables, as we've
808 * already got code in Trap0e which deals with out of sync handler
809 * flags (originally conceived for global pages).
810 */
811static void pgmHandlerPhysicalResetRamFlags(PVMCC pVM, PPGMPHYSHANDLER pCur)
812{
813 /*
814 * Iterate the guest ram pages updating the state.
815 */
816 RTUINT cPages = pCur->cPages;
817 RTGCPHYS GCPhys = pCur->Key;
818 PPGMRAMRANGE pRamHint = NULL;
819 for (;;)
820 {
821 PPGMPAGE pPage;
822 int rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, &pRamHint);
823 if (RT_SUCCESS(rc))
824 {
825 /* Reset aliased MMIO pages to MMIO, since this aliasing is our business.
826 (We don't flip MMIO to RAM though, that's PGMPhys.cpp's job.) */
827 bool fNemNotifiedAlready = false;
828 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
829 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO)
830 {
831 Assert(pCur->cAliasedPages > 0);
832 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhys, pRamHint, false /*fDoAccounting*/);
833 pCur->cAliasedPages--;
834 fNemNotifiedAlready = true;
835 }
836#ifdef VBOX_STRICT
837 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
838 AssertMsg(pCurType && (pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO || PGM_PAGE_IS_MMIO(pPage)),
839 ("%RGp %R[pgmpage]\n", GCPhys, pPage));
840#endif
841 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_NONE);
842
843#ifdef VBOX_WITH_NATIVE_NEM
844 /* Tell NEM about the protection change. */
845 if (VM_IS_NEM_ENABLED(pVM) && !fNemNotifiedAlready)
846 {
847 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
848 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
849 NEMHCNotifyPhysPageProtChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage),
850 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRamHint, GCPhys),
851 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
852 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
853 }
854#endif
855 RT_NOREF(fNemNotifiedAlready);
856 }
857 else
858 AssertRC(rc);
859
860 /* next */
861 if (--cPages == 0)
862 break;
863 GCPhys += GUEST_PAGE_SIZE;
864 }
865
866 pCur->cAliasedPages = 0;
867 pCur->cTmpOffPages = 0;
868
869 /*
870 * Check for partial start and end pages.
871 */
872 if (pCur->Key & GUEST_PAGE_OFFSET_MASK)
873 pgmHandlerPhysicalRecalcPageState(pVM, pCur->Key - 1, false /* fAbove */, &pRamHint);
874 if ((pCur->KeyLast & GUEST_PAGE_OFFSET_MASK) != GUEST_PAGE_OFFSET_MASK)
875 pgmHandlerPhysicalRecalcPageState(pVM, pCur->KeyLast + 1, true /* fAbove */, &pRamHint);
876}
877
878
879#if 0 /* unused */
880/**
881 * Modify a physical page access handler.
882 *
883 * Modification can only be done to the range it self, not the type or anything else.
884 *
885 * @returns VBox status code.
886 * For all return codes other than VERR_PGM_HANDLER_NOT_FOUND and VINF_SUCCESS the range is deregistered
887 * and a new registration must be performed!
888 * @param pVM The cross context VM structure.
889 * @param GCPhysCurrent Current location.
890 * @param GCPhys New location.
891 * @param GCPhysLast New last location.
892 */
893VMMDECL(int) PGMHandlerPhysicalModify(PVMCC pVM, RTGCPHYS GCPhysCurrent, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast)
894{
895 /*
896 * Remove it.
897 */
898 int rc;
899 PGM_LOCK_VOID(pVM);
900 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhysCurrent);
901 if (pCur)
902 {
903 /*
904 * Clear the ram flags. (We're gonna move or free it!)
905 */
906 pgmHandlerPhysicalResetRamFlags(pVM, pCur);
907 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
908 @todo pCurType validation
909 bool const fRestoreAsRAM = pCurType->pfnHandlerR3 /** @todo this isn't entirely correct. */
910 && pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO;
911
912 /*
913 * Validate the new range, modify and reinsert.
914 */
915 if (GCPhysLast >= GCPhys)
916 {
917 /*
918 * We require the range to be within registered ram.
919 * There is no apparent need to support ranges which cover more than one ram range.
920 */
921 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
922 if ( pRam
923 && GCPhys <= pRam->GCPhysLast
924 && GCPhysLast >= pRam->GCPhys)
925 {
926 pCur->Core.Key = GCPhys;
927 pCur->Core.KeyLast = GCPhysLast;
928 pCur->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + 1) >> GUEST_PAGE_SHIFT;
929
930 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pCur->Core))
931 {
932 RTGCPHYS const cb = GCPhysLast - GCPhys + 1;
933 PGMPHYSHANDLERKIND const enmKind = pCurType->enmKind;
934
935 /*
936 * Set ram flags, flush shadow PT entries and finally tell REM about this.
937 */
938 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam, NULL, 0);
939
940 /** @todo NEM: not sure we need this notification... */
941 NEMHCNotifyHandlerPhysicalModify(pVM, enmKind, GCPhysCurrent, GCPhys, cb, fRestoreAsRAM);
942
943 PGM_UNLOCK(pVM);
944
945 PGM_INVL_ALL_VCPU_TLBS(pVM);
946 Log(("PGMHandlerPhysicalModify: GCPhysCurrent=%RGp -> GCPhys=%RGp GCPhysLast=%RGp\n",
947 GCPhysCurrent, GCPhys, GCPhysLast));
948 return VINF_SUCCESS;
949 }
950
951 AssertMsgFailed(("Conflict! GCPhys=%RGp GCPhysLast=%RGp\n", GCPhys, GCPhysLast));
952 rc = VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
953 }
954 else
955 {
956 AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));
957 rc = VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
958 }
959 }
960 else
961 {
962 AssertMsgFailed(("Invalid range %RGp-%RGp\n", GCPhys, GCPhysLast));
963 rc = VERR_INVALID_PARAMETER;
964 }
965
966 /*
967 * Invalid new location, flush the cache and free it.
968 * We've only gotta notify REM and free the memory.
969 */
970 if (VM_IS_NEM_ENABLED(pVM))
971 pgmHandlerPhysicalDeregisterNotifyNEM(pVM, pCur);
972 pVM->pgm.s.pLastPhysHandlerR0 = 0;
973 pVM->pgm.s.pLastPhysHandlerR3 = 0;
974 PGMHandlerPhysicalTypeRelease(pVM, pCur->hType);
975 MMHyperFree(pVM, pCur);
976 }
977 else
978 {
979 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhysCurrent));
980 rc = VERR_PGM_HANDLER_NOT_FOUND;
981 }
982
983 PGM_UNLOCK(pVM);
984 return rc;
985}
986#endif /* unused */
987
988
989/**
990 * Changes the user callback arguments associated with a physical access handler.
991 *
992 * @returns VBox status code.
993 * @param pVM The cross context VM structure.
994 * @param GCPhys Start physical address of the handler.
995 * @param uUser User argument to the handlers.
996 */
997VMMDECL(int) PGMHandlerPhysicalChangeUserArg(PVMCC pVM, RTGCPHYS GCPhys, uint64_t uUser)
998{
999 /*
1000 * Find the handler and make the change.
1001 */
1002 int rc = PGM_LOCK(pVM);
1003 AssertRCReturn(rc, rc);
1004
1005 PPGMPHYSHANDLER pCur;
1006 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1007 if (RT_SUCCESS(rc))
1008 {
1009 Assert(pCur->Key == GCPhys);
1010 pCur->uUser = uUser;
1011 }
1012 else if (rc == VERR_NOT_FOUND)
1013 {
1014 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
1015 rc = VERR_PGM_HANDLER_NOT_FOUND;
1016 }
1017
1018 PGM_UNLOCK(pVM);
1019 return rc;
1020}
1021
1022#if 0 /* unused */
1023
1024/**
1025 * Splits a physical access handler in two.
1026 *
1027 * @returns VBox status code.
1028 * @param pVM The cross context VM structure.
1029 * @param GCPhys Start physical address of the handler.
1030 * @param GCPhysSplit The split address.
1031 */
1032VMMDECL(int) PGMHandlerPhysicalSplit(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysSplit)
1033{
1034 AssertReturn(GCPhys < GCPhysSplit, VERR_INVALID_PARAMETER);
1035
1036 /*
1037 * Do the allocation without owning the lock.
1038 */
1039 PPGMPHYSHANDLER pNew;
1040 int rc = MMHyperAlloc(pVM, sizeof(*pNew), 0, MM_TAG_PGM_HANDLERS, (void **)&pNew);
1041 if (RT_FAILURE(rc))
1042 return rc;
1043
1044 /*
1045 * Get the handler.
1046 */
1047 PGM_LOCK_VOID(pVM);
1048 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1049 if (RT_LIKELY(pCur))
1050 {
1051 if (RT_LIKELY(GCPhysSplit <= pCur->Core.KeyLast))
1052 {
1053 /*
1054 * Create new handler node for the 2nd half.
1055 */
1056 *pNew = *pCur;
1057 pNew->Core.Key = GCPhysSplit;
1058 pNew->cPages = (pNew->Core.KeyLast - (pNew->Core.Key & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
1059
1060 pCur->Core.KeyLast = GCPhysSplit - 1;
1061 pCur->cPages = (pCur->Core.KeyLast - (pCur->Core.Key & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
1062
1063 if (RT_LIKELY(RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pNew->Core)))
1064 {
1065 LogFlow(("PGMHandlerPhysicalSplit: %RGp-%RGp and %RGp-%RGp\n",
1066 pCur->Core.Key, pCur->Core.KeyLast, pNew->Core.Key, pNew->Core.KeyLast));
1067 PGM_UNLOCK(pVM);
1068 return VINF_SUCCESS;
1069 }
1070 AssertMsgFailed(("whu?\n"));
1071 rc = VERR_PGM_PHYS_HANDLER_IPE;
1072 }
1073 else
1074 {
1075 AssertMsgFailed(("outside range: %RGp-%RGp split %RGp\n", pCur->Core.Key, pCur->Core.KeyLast, GCPhysSplit));
1076 rc = VERR_INVALID_PARAMETER;
1077 }
1078 }
1079 else
1080 {
1081 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
1082 rc = VERR_PGM_HANDLER_NOT_FOUND;
1083 }
1084 PGM_UNLOCK(pVM);
1085 MMHyperFree(pVM, pNew);
1086 return rc;
1087}
1088
1089
1090/**
1091 * Joins up two adjacent physical access handlers which has the same callbacks.
1092 *
1093 * @returns VBox status code.
1094 * @param pVM The cross context VM structure.
1095 * @param GCPhys1 Start physical address of the first handler.
1096 * @param GCPhys2 Start physical address of the second handler.
1097 */
1098VMMDECL(int) PGMHandlerPhysicalJoin(PVMCC pVM, RTGCPHYS GCPhys1, RTGCPHYS GCPhys2)
1099{
1100 /*
1101 * Get the handlers.
1102 */
1103 int rc;
1104 PGM_LOCK_VOID(pVM);
1105 PPGMPHYSHANDLER pCur1 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys1);
1106 if (RT_LIKELY(pCur1))
1107 {
1108 PPGMPHYSHANDLER pCur2 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys2);
1109 if (RT_LIKELY(pCur2))
1110 {
1111 /*
1112 * Make sure that they are adjacent, and that they've got the same callbacks.
1113 */
1114 if (RT_LIKELY(pCur1->Core.KeyLast + 1 == pCur2->Core.Key))
1115 {
1116 if (RT_LIKELY(pCur1->hType == pCur2->hType))
1117 {
1118 PPGMPHYSHANDLER pCur3 = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys2);
1119 if (RT_LIKELY(pCur3 == pCur2))
1120 {
1121 pCur1->Core.KeyLast = pCur2->Core.KeyLast;
1122 pCur1->cPages = (pCur1->Core.KeyLast - (pCur1->Core.Key & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
1123 LogFlow(("PGMHandlerPhysicalJoin: %RGp-%RGp %RGp-%RGp\n",
1124 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
1125 pVM->pgm.s.pLastPhysHandlerR0 = 0;
1126 pVM->pgm.s.pLastPhysHandlerR3 = 0;
1127 PGMHandlerPhysicalTypeRelease(pVM, pCur2->hType);
1128 MMHyperFree(pVM, pCur2);
1129 PGM_UNLOCK(pVM);
1130 return VINF_SUCCESS;
1131 }
1132
1133 Assert(pCur3 == pCur2);
1134 rc = VERR_PGM_PHYS_HANDLER_IPE;
1135 }
1136 else
1137 {
1138 AssertMsgFailed(("mismatching handlers\n"));
1139 rc = VERR_ACCESS_DENIED;
1140 }
1141 }
1142 else
1143 {
1144 AssertMsgFailed(("not adjacent: %RGp-%RGp %RGp-%RGp\n",
1145 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
1146 rc = VERR_INVALID_PARAMETER;
1147 }
1148 }
1149 else
1150 {
1151 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys2));
1152 rc = VERR_PGM_HANDLER_NOT_FOUND;
1153 }
1154 }
1155 else
1156 {
1157 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys1));
1158 rc = VERR_PGM_HANDLER_NOT_FOUND;
1159 }
1160 PGM_UNLOCK(pVM);
1161 return rc;
1162
1163}
1164
1165#endif /* unused */
1166
1167/**
1168 * Resets any modifications to individual pages in a physical page access
1169 * handler region.
1170 *
1171 * This is used in pair with PGMHandlerPhysicalPageTempOff(),
1172 * PGMHandlerPhysicalPageAliasMmio2() or PGMHandlerPhysicalPageAliasHC().
1173 *
1174 * @returns VBox status code.
1175 * @param pVM The cross context VM structure.
1176 * @param GCPhys The start address of the handler regions, i.e. what you
1177 * passed to PGMR3HandlerPhysicalRegister(),
1178 * PGMHandlerPhysicalRegisterEx() or
1179 * PGMHandlerPhysicalModify().
1180 */
1181VMMDECL(int) PGMHandlerPhysicalReset(PVMCC pVM, RTGCPHYS GCPhys)
1182{
1183 LogFlow(("PGMHandlerPhysicalReset GCPhys=%RGp\n", GCPhys));
1184 int rc = PGM_LOCK(pVM);
1185 AssertRCReturn(rc, rc);
1186
1187 /*
1188 * Find the handler.
1189 */
1190 PPGMPHYSHANDLER pCur;
1191 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1192 if (RT_SUCCESS(rc))
1193 {
1194 Assert(pCur->Key == GCPhys);
1195
1196 /*
1197 * Validate kind.
1198 */
1199 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1200 switch (pCurType->enmKind)
1201 {
1202 case PGMPHYSHANDLERKIND_WRITE:
1203 case PGMPHYSHANDLERKIND_ALL:
1204 case PGMPHYSHANDLERKIND_MMIO: /* NOTE: Only use when clearing MMIO ranges with aliased MMIO2 pages! */
1205 {
1206 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysHandlerReset)); /** @todo move out of switch */
1207 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
1208 Assert(pRam);
1209 Assert(pRam->GCPhys <= pCur->Key);
1210 Assert(pRam->GCPhysLast >= pCur->KeyLast);
1211
1212 if (pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO)
1213 {
1214 /*
1215 * Reset all the PGMPAGETYPE_MMIO2_ALIAS_MMIO pages first and that's it.
1216 * This could probably be optimized a bit wrt to flushing, but I'm too lazy
1217 * to do that now...
1218 */
1219 if (pCur->cAliasedPages)
1220 {
1221 PPGMPAGE pPage = &pRam->aPages[(pCur->Key - pRam->GCPhys) >> GUEST_PAGE_SHIFT];
1222 RTGCPHYS GCPhysPage = pCur->Key;
1223 uint32_t cLeft = pCur->cPages;
1224 while (cLeft-- > 0)
1225 {
1226 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
1227 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO)
1228 {
1229 Assert(pCur->cAliasedPages > 0);
1230 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhysPage, pRam, false /*fDoAccounting*/);
1231 --pCur->cAliasedPages;
1232#ifndef VBOX_STRICT
1233 if (pCur->cAliasedPages == 0)
1234 break;
1235#endif
1236 }
1237 Assert(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO);
1238 GCPhysPage += GUEST_PAGE_SIZE;
1239 pPage++;
1240 }
1241 Assert(pCur->cAliasedPages == 0);
1242 }
1243 }
1244 else if (pCur->cTmpOffPages > 0)
1245 {
1246 /*
1247 * Set the flags and flush shadow PT entries.
1248 */
1249 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam, NULL /*pvBitmap*/, 0 /*offBitmap*/);
1250 }
1251
1252 pCur->cAliasedPages = 0;
1253 pCur->cTmpOffPages = 0;
1254
1255 rc = VINF_SUCCESS;
1256 break;
1257 }
1258
1259 /*
1260 * Invalid.
1261 */
1262 default:
1263 AssertMsgFailed(("Invalid type %d/%#x! Corruption!\n", pCurType->enmKind, pCur->hType));
1264 rc = VERR_PGM_PHYS_HANDLER_IPE;
1265 break;
1266 }
1267 }
1268 else if (rc == VERR_NOT_FOUND)
1269 {
1270 AssertMsgFailed(("Didn't find MMIO Range starting at %#x\n", GCPhys));
1271 rc = VERR_PGM_HANDLER_NOT_FOUND;
1272 }
1273
1274 PGM_UNLOCK(pVM);
1275 return rc;
1276}
1277
1278
1279/**
1280 * Special version of PGMHandlerPhysicalReset used by MMIO2 w/ dirty page
1281 * tracking.
1282 *
1283 * @returns VBox status code.
1284 * @param pVM The cross context VM structure.
1285 * @param GCPhys The start address of the handler region.
1286 * @param pvBitmap Dirty bitmap. Caller has cleared this already, only
1287 * dirty bits will be set. Caller also made sure it's big
1288 * enough.
1289 * @param offBitmap Dirty bitmap offset.
1290 * @remarks Caller must own the PGM critical section.
1291 */
1292DECLHIDDEN(int) pgmHandlerPhysicalResetMmio2WithBitmap(PVMCC pVM, RTGCPHYS GCPhys, void *pvBitmap, uint32_t offBitmap)
1293{
1294 LogFlow(("pgmHandlerPhysicalResetMmio2WithBitmap GCPhys=%RGp\n", GCPhys));
1295 PGM_LOCK_ASSERT_OWNER(pVM);
1296
1297 /*
1298 * Find the handler.
1299 */
1300 PPGMPHYSHANDLER pCur;
1301 int rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1302 if (RT_SUCCESS(rc))
1303 {
1304 Assert(pCur->Key == GCPhys);
1305
1306 /*
1307 * Validate kind.
1308 */
1309 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
1310 if ( pCurType
1311 && pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE)
1312 {
1313 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysHandlerReset));
1314
1315 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
1316 Assert(pRam);
1317 Assert(pRam->GCPhys <= pCur->Key);
1318 Assert(pRam->GCPhysLast >= pCur->KeyLast);
1319
1320 /*
1321 * Set the flags and flush shadow PT entries.
1322 */
1323 if (pCur->cTmpOffPages > 0)
1324 {
1325 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam, pvBitmap, offBitmap);
1326 pCur->cTmpOffPages = 0;
1327 }
1328 else
1329 rc = VINF_SUCCESS;
1330 }
1331 else
1332 {
1333 AssertFailed();
1334 rc = VERR_WRONG_TYPE;
1335 }
1336 }
1337 else if (rc == VERR_NOT_FOUND)
1338 {
1339 AssertMsgFailed(("Didn't find MMIO Range starting at %#x\n", GCPhys));
1340 rc = VERR_PGM_HANDLER_NOT_FOUND;
1341 }
1342
1343 return rc;
1344}
1345
1346
1347/**
1348 * Temporarily turns off the access monitoring of a page within a monitored
1349 * physical write/all page access handler region.
1350 *
1351 * Use this when no further \#PFs are required for that page. Be aware that
1352 * a page directory sync might reset the flags, and turn on access monitoring
1353 * for the page.
1354 *
1355 * The caller must do required page table modifications.
1356 *
1357 * @returns VBox status code.
1358 * @param pVM The cross context VM structure.
1359 * @param GCPhys The start address of the access handler. This
1360 * must be a fully page aligned range or we risk
1361 * messing up other handlers installed for the
1362 * start and end pages.
1363 * @param GCPhysPage The physical address of the page to turn off
1364 * access monitoring for.
1365 */
1366VMMDECL(int) PGMHandlerPhysicalPageTempOff(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage)
1367{
1368 LogFlow(("PGMHandlerPhysicalPageTempOff GCPhysPage=%RGp\n", GCPhysPage));
1369 int rc = PGM_LOCK(pVM);
1370 AssertRCReturn(rc, rc);
1371
1372 /*
1373 * Validate the range.
1374 */
1375 PPGMPHYSHANDLER pCur;
1376 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1377 if (RT_SUCCESS(rc))
1378 {
1379 Assert(pCur->Key == GCPhys);
1380 if (RT_LIKELY( GCPhysPage >= pCur->Key
1381 && GCPhysPage <= pCur->KeyLast))
1382 {
1383 Assert(!(pCur->Key & GUEST_PAGE_OFFSET_MASK));
1384 Assert((pCur->KeyLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK);
1385
1386 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
1387 AssertReturnStmt( pCurType
1388 && ( pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE
1389 || pCurType->enmKind == PGMPHYSHANDLERKIND_ALL),
1390 PGM_UNLOCK(pVM), VERR_ACCESS_DENIED);
1391
1392 /*
1393 * Change the page status.
1394 */
1395 PPGMPAGE pPage;
1396 PPGMRAMRANGE pRam;
1397 rc = pgmPhysGetPageAndRangeEx(pVM, GCPhysPage, &pPage, &pRam);
1398 AssertReturnStmt(RT_SUCCESS_NP(rc), PGM_UNLOCK(pVM), rc);
1399 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
1400 {
1401 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
1402 pCur->cTmpOffPages++;
1403
1404#ifdef VBOX_WITH_NATIVE_NEM
1405 /* Tell NEM about the protection change (VGA is using this to track dirty pages). */
1406 if (VM_IS_NEM_ENABLED(pVM))
1407 {
1408 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1409 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
1410 NEMHCNotifyPhysPageProtChanged(pVM, GCPhysPage, PGM_PAGE_GET_HCPHYS(pPage),
1411 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
1412 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
1413 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1414 }
1415#endif
1416 }
1417 PGM_UNLOCK(pVM);
1418 return VINF_SUCCESS;
1419 }
1420 PGM_UNLOCK(pVM);
1421 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n", GCPhysPage, pCur->Key, pCur->KeyLast));
1422 return VERR_INVALID_PARAMETER;
1423 }
1424 PGM_UNLOCK(pVM);
1425
1426 if (rc == VERR_NOT_FOUND)
1427 {
1428 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
1429 return VERR_PGM_HANDLER_NOT_FOUND;
1430 }
1431 return rc;
1432}
1433
1434
1435/**
1436 * Resolves an MMIO2 page.
1437 *
1438 * Caller as taken the PGM lock.
1439 *
1440 * @returns Pointer to the page if valid, NULL otherwise
1441 * @param pVM The cross context VM structure.
1442 * @param pDevIns The device owning it.
1443 * @param hMmio2 The MMIO2 region.
1444 * @param offMmio2Page The offset into the region.
1445 */
1446static PPGMPAGE pgmPhysResolveMmio2PageLocked(PVMCC pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, RTGCPHYS offMmio2Page)
1447{
1448 /* Only works if the handle is in the handle table! */
1449 AssertReturn(hMmio2 != 0, NULL);
1450 hMmio2--;
1451
1452 /* Must check the first one for PGMREGMMIO2RANGE_F_FIRST_CHUNK. */
1453 AssertReturn(hMmio2 < RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3), NULL);
1454 PPGMREGMMIO2RANGE pCur = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[hMmio2];
1455 AssertReturn(pCur, NULL);
1456 AssertReturn(pCur->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK, NULL);
1457
1458 /* Loop thru the sub-ranges till we find the one covering offMmio2. */
1459 for (;;)
1460 {
1461#ifdef IN_RING3
1462 AssertReturn(pCur->pDevInsR3 == pDevIns, NULL);
1463#else
1464 AssertReturn(pCur->pDevInsR3 == pDevIns->pDevInsForR3, NULL);
1465#endif
1466
1467 /* Does it match the offset? */
1468 if (offMmio2Page < pCur->cbReal)
1469 return &pCur->RamRange.aPages[offMmio2Page >> GUEST_PAGE_SHIFT];
1470
1471 /* Advance if we can. */
1472 AssertReturn(!(pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK), NULL);
1473 offMmio2Page -= pCur->cbReal;
1474 hMmio2++;
1475 AssertReturn(hMmio2 < RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3), NULL);
1476 pCur = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[hMmio2];
1477 AssertReturn(pCur, NULL);
1478 }
1479}
1480
1481
1482/**
1483 * Replaces an MMIO page with an MMIO2 page.
1484 *
1485 * This is a worker for IOMMMIOMapMMIO2Page that works in a similar way to
1486 * PGMHandlerPhysicalPageTempOff but for an MMIO page. Since an MMIO page has no
1487 * backing, the caller must provide a replacement page. For various reasons the
1488 * replacement page must be an MMIO2 page.
1489 *
1490 * The caller must do required page table modifications. You can get away
1491 * without making any modifications since it's an MMIO page, the cost is an extra
1492 * \#PF which will the resync the page.
1493 *
1494 * Call PGMHandlerPhysicalReset() to restore the MMIO page.
1495 *
1496 * The caller may still get handler callback even after this call and must be
1497 * able to deal correctly with such calls. The reason for these callbacks are
1498 * either that we're executing in the recompiler (which doesn't know about this
1499 * arrangement) or that we've been restored from saved state (where we won't
1500 * save the change).
1501 *
1502 * @returns VBox status code.
1503 * @param pVM The cross context VM structure.
1504 * @param GCPhys The start address of the access handler. This
1505 * must be a fully page aligned range or we risk
1506 * messing up other handlers installed for the
1507 * start and end pages.
1508 * @param GCPhysPage The physical address of the page to turn off
1509 * access monitoring for and replace with the MMIO2
1510 * page.
1511 * @param pDevIns The device instance owning @a hMmio2.
1512 * @param hMmio2 Handle to the MMIO2 region containing the page
1513 * to remap in the the MMIO page at @a GCPhys.
1514 * @param offMmio2PageRemap The offset into @a hMmio2 of the MMIO2 page that
1515 * should serve as backing memory.
1516 *
1517 * @remark May cause a page pool flush if used on a page that is already
1518 * aliased.
1519 *
1520 * @note This trick does only work reliably if the two pages are never ever
1521 * mapped in the same page table. If they are the page pool code will
1522 * be confused should either of them be flushed. See the special case
1523 * of zero page aliasing mentioned in #3170.
1524 *
1525 */
1526VMMDECL(int) PGMHandlerPhysicalPageAliasMmio2(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage,
1527 PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, RTGCPHYS offMmio2PageRemap)
1528{
1529#ifdef VBOX_WITH_PGM_NEM_MODE
1530 AssertReturn(!VM_IS_NEM_ENABLED(pVM) || !pVM->pgm.s.fNemMode, VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE);
1531#endif
1532 int rc = PGM_LOCK(pVM);
1533 AssertRCReturn(rc, rc);
1534
1535 /*
1536 * Resolve the MMIO2 reference.
1537 */
1538 PPGMPAGE pPageRemap = pgmPhysResolveMmio2PageLocked(pVM, pDevIns, hMmio2, offMmio2PageRemap);
1539 if (RT_LIKELY(pPageRemap))
1540 AssertMsgReturnStmt(PGM_PAGE_GET_TYPE(pPageRemap) == PGMPAGETYPE_MMIO2,
1541 ("hMmio2=%RU64 offMmio2PageRemap=%RGp %R[pgmpage]\n", hMmio2, offMmio2PageRemap, pPageRemap),
1542 PGM_UNLOCK(pVM), VERR_PGM_PHYS_NOT_MMIO2);
1543 else
1544 {
1545 PGM_UNLOCK(pVM);
1546 return VERR_OUT_OF_RANGE;
1547 }
1548
1549 /*
1550 * Lookup and validate the range.
1551 */
1552 PPGMPHYSHANDLER pCur;
1553 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1554 if (RT_SUCCESS(rc))
1555 {
1556 Assert(pCur->Key == GCPhys);
1557 if (RT_LIKELY( GCPhysPage >= pCur->Key
1558 && GCPhysPage <= pCur->KeyLast))
1559 {
1560 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1561 AssertReturnStmt(pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO, PGM_UNLOCK(pVM), VERR_ACCESS_DENIED);
1562 AssertReturnStmt(!(pCur->Key & GUEST_PAGE_OFFSET_MASK), PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
1563 AssertReturnStmt((pCur->KeyLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK,
1564 PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
1565
1566 /*
1567 * Validate the page.
1568 */
1569 PPGMPAGE pPage;
1570 PPGMRAMRANGE pRam;
1571 rc = pgmPhysGetPageAndRangeEx(pVM, GCPhysPage, &pPage, &pRam);
1572 AssertReturnStmt(RT_SUCCESS_NP(rc), PGM_UNLOCK(pVM), rc);
1573 if (PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO)
1574 {
1575 AssertMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO,
1576 ("GCPhysPage=%RGp %R[pgmpage]\n", GCPhysPage, pPage),
1577 VERR_PGM_PHYS_NOT_MMIO2);
1578 if (PGM_PAGE_GET_HCPHYS(pPage) == PGM_PAGE_GET_HCPHYS(pPageRemap))
1579 {
1580 PGM_UNLOCK(pVM);
1581 return VINF_PGM_HANDLER_ALREADY_ALIASED;
1582 }
1583
1584 /*
1585 * The page is already mapped as some other page, reset it
1586 * to an MMIO/ZERO page before doing the new mapping.
1587 */
1588 Log(("PGMHandlerPhysicalPageAliasMmio2: GCPhysPage=%RGp (%R[pgmpage]; %RHp -> %RHp\n",
1589 GCPhysPage, pPage, PGM_PAGE_GET_HCPHYS(pPage), PGM_PAGE_GET_HCPHYS(pPageRemap)));
1590 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhysPage, pRam, false /*fDoAccounting*/);
1591 pCur->cAliasedPages--;
1592 }
1593 Assert(PGM_PAGE_IS_ZERO(pPage));
1594
1595 /*
1596 * Do the actual remapping here.
1597 * This page now serves as an alias for the backing memory specified.
1598 */
1599 LogFlow(("PGMHandlerPhysicalPageAliasMmio2: %RGp (%R[pgmpage]) alias for %RU64/%RGp (%R[pgmpage])\n",
1600 GCPhysPage, pPage, hMmio2, offMmio2PageRemap, pPageRemap ));
1601 PGM_PAGE_SET_HCPHYS(pVM, pPage, PGM_PAGE_GET_HCPHYS(pPageRemap));
1602 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_MMIO2_ALIAS_MMIO);
1603 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1604 PGM_PAGE_SET_PAGEID(pVM, pPage, PGM_PAGE_GET_PAGEID(pPageRemap));
1605 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
1606 pCur->cAliasedPages++;
1607 Assert(pCur->cAliasedPages <= pCur->cPages);
1608
1609 /* Flush its TLB entry. */
1610 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
1611 /* Not calling IEMTlbInvalidateAllPhysicalAllCpus here as aliased pages are handled like MMIO by the IEM TLB. */
1612
1613#ifdef VBOX_WITH_NATIVE_NEM
1614 /* Tell NEM about the backing and protection change. */
1615 if (VM_IS_NEM_ENABLED(pVM))
1616 {
1617 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1618 NEMHCNotifyPhysPageChanged(pVM, GCPhysPage, pVM->pgm.s.HCPhysZeroPg, PGM_PAGE_GET_HCPHYS(pPage),
1619 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
1620 pgmPhysPageCalcNemProtection(pPage, PGMPAGETYPE_MMIO2_ALIAS_MMIO),
1621 PGMPAGETYPE_MMIO2_ALIAS_MMIO, &u2State);
1622 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1623 }
1624#endif
1625 LogFlow(("PGMHandlerPhysicalPageAliasMmio2: => %R[pgmpage]\n", pPage));
1626 PGM_UNLOCK(pVM);
1627 return VINF_SUCCESS;
1628 }
1629
1630 PGM_UNLOCK(pVM);
1631 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n", GCPhysPage, pCur->Key, pCur->KeyLast));
1632 return VERR_INVALID_PARAMETER;
1633 }
1634
1635 PGM_UNLOCK(pVM);
1636 if (rc == VERR_NOT_FOUND)
1637 {
1638 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
1639 return VERR_PGM_HANDLER_NOT_FOUND;
1640 }
1641 return rc;
1642}
1643
1644
1645/**
1646 * Replaces an MMIO page with an arbitrary HC page in the shadow page tables.
1647 *
1648 * This differs from PGMHandlerPhysicalPageAliasMmio2 in that the page doesn't
1649 * need to be a known MMIO2 page and that only shadow paging may access the
1650 * page. The latter distinction is important because the only use for this
1651 * feature is for mapping the special APIC access page that VT-x uses to detect
1652 * APIC MMIO operations, the page is shared between all guest CPUs and actually
1653 * not written to. At least at the moment.
1654 *
1655 * The caller must do required page table modifications. You can get away
1656 * without making any modifications since it's an MMIO page, the cost is an extra
1657 * \#PF which will the resync the page.
1658 *
1659 * Call PGMHandlerPhysicalReset() to restore the MMIO page.
1660 *
1661 *
1662 * @returns VBox status code.
1663 * @param pVM The cross context VM structure.
1664 * @param GCPhys The start address of the access handler. This
1665 * must be a fully page aligned range or we risk
1666 * messing up other handlers installed for the
1667 * start and end pages.
1668 * @param GCPhysPage The physical address of the page to turn off
1669 * access monitoring for.
1670 * @param HCPhysPageRemap The physical address of the HC page that
1671 * serves as backing memory.
1672 *
1673 * @remark May cause a page pool flush if used on a page that is already
1674 * aliased.
1675 */
1676VMMDECL(int) PGMHandlerPhysicalPageAliasHC(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage, RTHCPHYS HCPhysPageRemap)
1677{
1678/// Assert(!IOMIsLockOwner(pVM)); /* We mustn't own any other locks when calling this */
1679#ifdef VBOX_WITH_PGM_NEM_MODE
1680 AssertReturn(!VM_IS_NEM_ENABLED(pVM) || !pVM->pgm.s.fNemMode, VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE);
1681#endif
1682 int rc = PGM_LOCK(pVM);
1683 AssertRCReturn(rc, rc);
1684
1685 /*
1686 * Lookup and validate the range.
1687 */
1688 PPGMPHYSHANDLER pCur;
1689 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1690 if (RT_SUCCESS(rc))
1691 {
1692 Assert(pCur->Key == GCPhys);
1693 if (RT_LIKELY( GCPhysPage >= pCur->Key
1694 && GCPhysPage <= pCur->KeyLast))
1695 {
1696 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1697 AssertReturnStmt(pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO, PGM_UNLOCK(pVM), VERR_ACCESS_DENIED);
1698 AssertReturnStmt(!(pCur->Key & GUEST_PAGE_OFFSET_MASK), PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
1699 AssertReturnStmt((pCur->KeyLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK,
1700 PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
1701
1702 /*
1703 * Get and validate the pages.
1704 */
1705 PPGMPAGE pPage;
1706 rc = pgmPhysGetPageEx(pVM, GCPhysPage, &pPage);
1707 AssertReturnStmt(RT_SUCCESS_NP(rc), PGM_UNLOCK(pVM), rc);
1708 if (PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO)
1709 {
1710 PGM_UNLOCK(pVM);
1711 AssertMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO,
1712 ("GCPhysPage=%RGp %R[pgmpage]\n", GCPhysPage, pPage),
1713 VERR_PGM_PHYS_NOT_MMIO2);
1714 return VINF_PGM_HANDLER_ALREADY_ALIASED;
1715 }
1716 Assert(PGM_PAGE_IS_ZERO(pPage));
1717
1718 /*
1719 * Do the actual remapping here.
1720 * This page now serves as an alias for the backing memory
1721 * specified as far as shadow paging is concerned.
1722 */
1723 LogFlow(("PGMHandlerPhysicalPageAliasHC: %RGp (%R[pgmpage]) alias for %RHp\n",
1724 GCPhysPage, pPage, HCPhysPageRemap));
1725 PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhysPageRemap);
1726 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_SPECIAL_ALIAS_MMIO);
1727 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1728 PGM_PAGE_SET_PAGEID(pVM, pPage, NIL_GMM_PAGEID);
1729 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
1730 pCur->cAliasedPages++;
1731 Assert(pCur->cAliasedPages <= pCur->cPages);
1732
1733 /* Flush its TLB entry. */
1734 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
1735 /* Not calling IEMTlbInvalidateAllPhysicalAllCpus here as aliased pages are handled like MMIO by the IEM TLB. */
1736
1737#ifdef VBOX_WITH_NATIVE_NEM
1738 /* Tell NEM about the backing and protection change. */
1739 if (VM_IS_NEM_ENABLED(pVM))
1740 {
1741 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhysPage);
1742 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1743 NEMHCNotifyPhysPageChanged(pVM, GCPhysPage, pVM->pgm.s.HCPhysZeroPg, PGM_PAGE_GET_HCPHYS(pPage),
1744 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
1745 pgmPhysPageCalcNemProtection(pPage, PGMPAGETYPE_SPECIAL_ALIAS_MMIO),
1746 PGMPAGETYPE_SPECIAL_ALIAS_MMIO, &u2State);
1747 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1748 }
1749#endif
1750 LogFlow(("PGMHandlerPhysicalPageAliasHC: => %R[pgmpage]\n", pPage));
1751 PGM_UNLOCK(pVM);
1752 return VINF_SUCCESS;
1753 }
1754 PGM_UNLOCK(pVM);
1755 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n", GCPhysPage, pCur->Key, pCur->KeyLast));
1756 return VERR_INVALID_PARAMETER;
1757 }
1758 PGM_UNLOCK(pVM);
1759
1760 if (rc == VERR_NOT_FOUND)
1761 {
1762 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
1763 return VERR_PGM_HANDLER_NOT_FOUND;
1764 }
1765 return rc;
1766}
1767
1768
1769/**
1770 * Checks if a physical range is handled
1771 *
1772 * @returns boolean
1773 * @param pVM The cross context VM structure.
1774 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister().
1775 * @remarks Caller must take the PGM lock...
1776 * @thread EMT.
1777 */
1778VMMDECL(bool) PGMHandlerPhysicalIsRegistered(PVMCC pVM, RTGCPHYS GCPhys)
1779{
1780 /*
1781 * Find the handler.
1782 */
1783 PGM_LOCK_VOID(pVM);
1784 PPGMPHYSHANDLER pCur;
1785 int rc = pgmHandlerPhysicalLookup(pVM, GCPhys, &pCur);
1786 if (RT_SUCCESS(rc))
1787 {
1788#ifdef VBOX_STRICT
1789 Assert(GCPhys >= pCur->Key && GCPhys <= pCur->KeyLast);
1790 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1791 Assert( pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE
1792 || pCurType->enmKind == PGMPHYSHANDLERKIND_ALL
1793 || pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO);
1794#endif
1795 PGM_UNLOCK(pVM);
1796 return true;
1797 }
1798 PGM_UNLOCK(pVM);
1799 return false;
1800}
1801
1802
1803/**
1804 * Checks if it's an disabled all access handler or write access handler at the
1805 * given address.
1806 *
1807 * @returns true if it's an all access handler, false if it's a write access
1808 * handler.
1809 * @param pVM The cross context VM structure.
1810 * @param GCPhys The address of the page with a disabled handler.
1811 *
1812 * @remarks The caller, PGMR3PhysTlbGCPhys2Ptr, must hold the PGM lock.
1813 */
1814bool pgmHandlerPhysicalIsAll(PVMCC pVM, RTGCPHYS GCPhys)
1815{
1816 PGM_LOCK_VOID(pVM);
1817 PPGMPHYSHANDLER pCur;
1818 int rc = pgmHandlerPhysicalLookup(pVM, GCPhys, &pCur);
1819 AssertRCReturnStmt(rc, PGM_UNLOCK(pVM), true);
1820
1821 /* Only whole pages can be disabled. */
1822 Assert( pCur->Key <= (GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK)
1823 && pCur->KeyLast >= (GCPhys | GUEST_PAGE_OFFSET_MASK));
1824
1825 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1826 Assert( pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE
1827 || pCurType->enmKind == PGMPHYSHANDLERKIND_ALL
1828 || pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO); /* sanity */
1829 bool const fRet = pCurType->enmKind != PGMPHYSHANDLERKIND_WRITE;
1830 PGM_UNLOCK(pVM);
1831 return fRet;
1832}
1833
1834#ifdef VBOX_STRICT
1835
1836/**
1837 * State structure used by the PGMAssertHandlerAndFlagsInSync() function
1838 * and its AVL enumerators.
1839 */
1840typedef struct PGMAHAFIS
1841{
1842 /** The current physical address. */
1843 RTGCPHYS GCPhys;
1844 /** Number of errors. */
1845 unsigned cErrors;
1846 /** Pointer to the VM. */
1847 PVM pVM;
1848} PGMAHAFIS, *PPGMAHAFIS;
1849
1850
1851/**
1852 * Asserts that the handlers+guest-page-tables == ramrange-flags and
1853 * that the physical addresses associated with virtual handlers are correct.
1854 *
1855 * @returns Number of mismatches.
1856 * @param pVM The cross context VM structure.
1857 */
1858VMMDECL(unsigned) PGMAssertHandlerAndFlagsInSync(PVMCC pVM)
1859{
1860 PPGM pPGM = &pVM->pgm.s;
1861 PGMAHAFIS State;
1862 State.GCPhys = 0;
1863 State.cErrors = 0;
1864 State.pVM = pVM;
1865
1866 PGM_LOCK_ASSERT_OWNER(pVM);
1867
1868 /*
1869 * Check the RAM flags against the handlers.
1870 */
1871 PPGMPHYSHANDLERTREE const pPhysHandlerTree = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree;
1872 for (PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRangesX); pRam; pRam = pRam->CTX_SUFF(pNext))
1873 {
1874 const uint32_t cPages = pRam->cb >> GUEST_PAGE_SHIFT;
1875 for (uint32_t iPage = 0; iPage < cPages; iPage++)
1876 {
1877 PGMPAGE const *pPage = &pRam->aPages[iPage];
1878 if (PGM_PAGE_HAS_ANY_HANDLERS(pPage))
1879 {
1880 State.GCPhys = pRam->GCPhys + (iPage << GUEST_PAGE_SHIFT);
1881
1882 /*
1883 * Physical first - calculate the state based on the handlers
1884 * active on the page, then compare.
1885 */
1886 if (PGM_PAGE_HAS_ANY_PHYSICAL_HANDLERS(pPage))
1887 {
1888 /* the first */
1889 PPGMPHYSHANDLER pPhys;
1890 int rc = pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, State.GCPhys, &pPhys);
1891 if (rc == VERR_NOT_FOUND)
1892 {
1893 rc = pPhysHandlerTree->lookupMatchingOrAbove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator,
1894 State.GCPhys, &pPhys);
1895 if (RT_SUCCESS(rc))
1896 {
1897 Assert(pPhys->Key >= State.GCPhys);
1898 if (pPhys->Key > (State.GCPhys + GUEST_PAGE_SIZE - 1))
1899 pPhys = NULL;
1900 }
1901 else
1902 AssertLogRelMsgReturn(rc == VERR_NOT_FOUND, ("rc=%Rrc GCPhys=%RGp\n", rc, State.GCPhys), 999);
1903 }
1904 else
1905 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("rc=%Rrc GCPhys=%RGp\n", rc, State.GCPhys), 999);
1906
1907 if (pPhys)
1908 {
1909 PCPGMPHYSHANDLERTYPEINT pPhysType = pgmHandlerPhysicalTypeHandleToPtr(pVM, pPhys->hType);
1910 unsigned uState = pPhysType->uState;
1911
1912 /* more? */
1913 while (pPhys->KeyLast < (State.GCPhys | GUEST_PAGE_OFFSET_MASK))
1914 {
1915 PPGMPHYSHANDLER pPhys2;
1916 rc = pPhysHandlerTree->lookupMatchingOrAbove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator,
1917 pPhys->KeyLast + 1, &pPhys2);
1918 if (rc == VERR_NOT_FOUND)
1919 break;
1920 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("rc=%Rrc KeyLast+1=%RGp\n", rc, pPhys->KeyLast + 1), 999);
1921 if (pPhys2->Key > (State.GCPhys | GUEST_PAGE_OFFSET_MASK))
1922 break;
1923 PCPGMPHYSHANDLERTYPEINT pPhysType2 = pgmHandlerPhysicalTypeHandleToPtr(pVM, pPhys2->hType);
1924 uState = RT_MAX(uState, pPhysType2->uState);
1925 pPhys = pPhys2;
1926 }
1927
1928 /* compare.*/
1929 if ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != uState
1930 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
1931 {
1932 AssertMsgFailed(("ram range vs phys handler flags mismatch. GCPhys=%RGp state=%d expected=%d %s\n",
1933 State.GCPhys, PGM_PAGE_GET_HNDL_PHYS_STATE(pPage), uState, pPhysType->pszDesc));
1934 State.cErrors++;
1935 }
1936 }
1937 else
1938 {
1939 AssertMsgFailed(("ram range vs phys handler mismatch. no handler for GCPhys=%RGp\n", State.GCPhys));
1940 State.cErrors++;
1941 }
1942 }
1943 }
1944 } /* foreach page in ram range. */
1945 } /* foreach ram range. */
1946
1947 /*
1948 * Do the reverse check for physical handlers.
1949 */
1950 /** @todo */
1951
1952 return State.cErrors;
1953}
1954
1955#endif /* VBOX_STRICT */
1956
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette