VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp@ 107179

Last change on this file since 107179 was 107179, checked in by vboxsync, 7 weeks ago

VMM/PGM: Fixed unused param/function warnings on darwin.arm64. jiraref:VBP-1466

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 86.7 KB
Line 
1/* $Id: PGMAllHandler.cpp 107179 2024-11-29 11:23:32Z vboxsync $ */
2/** @file
3 * PGM - Page Manager / Monitor, Access Handlers.
4 */
5
6/*
7 * Copyright (C) 2006-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_PGM
33#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
34#include <VBox/vmm/dbgf.h>
35#include <VBox/vmm/pgm.h>
36#include <VBox/vmm/iem.h>
37#include <VBox/vmm/iom.h>
38#include <VBox/vmm/mm.h>
39#include <VBox/vmm/em.h>
40#include <VBox/vmm/nem.h>
41#include <VBox/vmm/stam.h>
42#include <VBox/vmm/dbgf.h>
43#ifdef IN_RING0
44# include <VBox/vmm/pdmdev.h>
45#endif
46#include "PGMInternal.h"
47#include <VBox/vmm/vmcc.h>
48#include "PGMInline.h"
49
50#include <VBox/log.h>
51#include <iprt/assert.h>
52#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
53# include <iprt/asm-amd64-x86.h>
54#endif
55#include <iprt/string.h>
56#include <VBox/param.h>
57#include <VBox/err.h>
58#include <VBox/vmm/selm.h>
59
60
61/*********************************************************************************************************************************
62* Global Variables *
63*********************************************************************************************************************************/
64/** Dummy physical access handler type record. */
65CTX_SUFF(PGMPHYSHANDLERTYPEINT) const g_pgmHandlerPhysicalDummyType =
66{
67 /* .hType = */ UINT64_C(0x93b7557e1937aaff),
68 /* .enmKind = */ PGMPHYSHANDLERKIND_INVALID,
69 /* .uState = */ PGM_PAGE_HNDL_PHYS_STATE_ALL,
70 /* .fKeepPgmLock = */ true,
71 /* .fRing0DevInsIdx = */ false,
72#ifdef IN_RING0
73 /* .fNotInHm = */ false,
74 /* .pfnHandler = */ pgmR0HandlerPhysicalHandlerToRing3,
75 /* .pfnPfHandler = */ pgmR0HandlerPhysicalPfHandlerToRing3,
76#elif defined(IN_RING3)
77 /* .fRing0Enabled = */ false,
78 /* .fNotInHm = */ false,
79 /* .pfnHandler = */ pgmR3HandlerPhysicalHandlerInvalid,
80#else
81# error "unsupported context"
82#endif
83 /* .pszDesc = */ "dummy"
84};
85
86
87/*********************************************************************************************************************************
88* Internal Functions *
89*********************************************************************************************************************************/
90static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVMCC pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam,
91 void *pvBitmap, uint32_t offBitmap);
92static void pgmHandlerPhysicalDeregisterNotifyNEM(PVMCC pVM, PPGMPHYSHANDLER pCur);
93static void pgmHandlerPhysicalResetRamFlags(PVMCC pVM, PPGMPHYSHANDLER pCur);
94
95
96#ifndef IN_RING3
97
98/**
99 * @callback_method_impl{FNPGMPHYSHANDLER,
100 * Dummy for forcing ring-3 handling of the access.}
101 */
102DECLCALLBACK(VBOXSTRICTRC)
103pgmR0HandlerPhysicalHandlerToRing3(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
104 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, uint64_t uUser)
105{
106 RT_NOREF(pVM, pVCpu, GCPhys, pvPhys, pvBuf, cbBuf, enmAccessType, enmOrigin, uUser);
107 return VINF_EM_RAW_EMULATE_INSTR;
108}
109
110
111/**
112 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
113 * Dummy for forcing ring-3 handling of the access.}
114 */
115DECLCALLBACK(VBOXSTRICTRC)
116pgmR0HandlerPhysicalPfHandlerToRing3(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTX pCtx,
117 RTGCPTR pvFault, RTGCPHYS GCPhysFault, uint64_t uUser)
118{
119 RT_NOREF(pVM, pVCpu, uErrorCode, pCtx, pvFault, GCPhysFault, uUser);
120 return VINF_EM_RAW_EMULATE_INSTR;
121}
122
123#endif /* !IN_RING3 */
124
125
126/**
127 * Worker for pgmHandlerPhysicalExCreate.
128 *
129 * @returns A new physical handler on success or NULL on failure.
130 * @param pVM The cross context VM structure.
131 * @param pType The physical handler type.
132 * @param hType The physical handler type registeration handle.
133 * @param uUser User argument to the handlers (not pointer).
134 * @param pszDesc Description of this handler. If NULL, the type description
135 * will be used instead.
136 */
137DECL_FORCE_INLINE(PPGMPHYSHANDLER) pgmHandlerPhysicalExCreateWorker(PVMCC pVM, PCPGMPHYSHANDLERTYPEINT pType,
138 PGMPHYSHANDLERTYPE hType, uint64_t uUser,
139 R3PTRTYPE(const char *) pszDesc)
140{
141 PGM_LOCK_ASSERT_OWNER(pVM);
142 PPGMPHYSHANDLER pNew = pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator.allocateNode();
143 if (pNew)
144 {
145 pNew->Key = NIL_RTGCPHYS;
146 pNew->KeyLast = NIL_RTGCPHYS;
147 pNew->cPages = 0;
148 pNew->cAliasedPages = 0;
149 pNew->cTmpOffPages = 0;
150 pNew->uUser = uUser;
151 pNew->hType = hType;
152 pNew->pszDesc = pszDesc != NIL_RTR3PTR ? pszDesc
153#ifdef IN_RING3
154 : pType->pszDesc;
155#else
156 : pVM->pgm.s.aPhysHandlerTypes[hType & PGMPHYSHANDLERTYPE_IDX_MASK].pszDesc;
157 NOREF(pType);
158#endif
159 }
160 return pNew;
161}
162
163
164/**
165 * Creates a physical access handler, allocation part.
166 *
167 * @returns VBox status code.
168 * @retval VERR_OUT_OF_RESOURCES if no more handlers available.
169 *
170 * @param pVM The cross context VM structure.
171 * @param hType The handler type registration handle.
172 * @param uUser User argument to the handlers (not pointer).
173 * @param pszDesc Description of this handler. If NULL, the type
174 * description will be used instead.
175 * @param ppPhysHandler Where to return the access handler structure on
176 * success.
177 */
178int pgmHandlerPhysicalExCreate(PVMCC pVM, PGMPHYSHANDLERTYPE hType, uint64_t uUser,
179 R3PTRTYPE(const char *) pszDesc, PPGMPHYSHANDLER *ppPhysHandler)
180{
181 /*
182 * Validate input.
183 */
184 PCPGMPHYSHANDLERTYPEINT const pType = pgmHandlerPhysicalTypeHandleToPtr(pVM, hType);
185 AssertReturn(pType, VERR_INVALID_HANDLE);
186 AssertReturn(pType->enmKind > PGMPHYSHANDLERKIND_INVALID && pType->enmKind < PGMPHYSHANDLERKIND_END, VERR_INVALID_HANDLE);
187 AssertPtr(ppPhysHandler);
188
189 Log(("pgmHandlerPhysicalExCreate: uUser=%#RX64 hType=%#x (%d, %s) pszDesc=%RHv:%s\n",
190 uUser, hType, pType->enmKind, pType->pszDesc, pszDesc, R3STRING(pszDesc)));
191
192 /*
193 * Allocate and initialize the new entry.
194 */
195 int rc = PGM_LOCK(pVM);
196 AssertRCReturn(rc, rc);
197 *ppPhysHandler = pgmHandlerPhysicalExCreateWorker(pVM, pType, hType, uUser, pszDesc);
198 PGM_UNLOCK(pVM);
199 if (*ppPhysHandler)
200 return VINF_SUCCESS;
201 return VERR_OUT_OF_RESOURCES;
202}
203
204
205/**
206 * Duplicates a physical access handler.
207 *
208 * @returns VBox status code.
209 * @retval VINF_SUCCESS when successfully installed.
210 *
211 * @param pVM The cross context VM structure.
212 * @param pPhysHandlerSrc The source handler to duplicate
213 * @param ppPhysHandler Where to return the access handler structure on
214 * success.
215 */
216int pgmHandlerPhysicalExDup(PVMCC pVM, PPGMPHYSHANDLER pPhysHandlerSrc, PPGMPHYSHANDLER *ppPhysHandler)
217{
218 return pgmHandlerPhysicalExCreate(pVM, pPhysHandlerSrc->hType, pPhysHandlerSrc->uUser,
219 pPhysHandlerSrc->pszDesc, ppPhysHandler);
220}
221
222
223/**
224 * Register a access handler for a physical range.
225 *
226 * @returns VBox status code.
227 * @retval VINF_SUCCESS when successfully installed.
228 * @retval VINF_PGM_GCPHYS_ALIASED could be returned.
229 *
230 * @param pVM The cross context VM structure.
231 * @param pPhysHandler The physical handler.
232 * @param GCPhys Start physical address.
233 * @param GCPhysLast Last physical address. (inclusive)
234 */
235int pgmHandlerPhysicalExRegister(PVMCC pVM, PPGMPHYSHANDLER pPhysHandler, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast)
236{
237 /*
238 * Validate input.
239 */
240 AssertReturn(pPhysHandler, VERR_INVALID_POINTER);
241 PGMPHYSHANDLERTYPE const hType = pPhysHandler->hType;
242 PCPGMPHYSHANDLERTYPEINT const pType = pgmHandlerPhysicalTypeHandleToPtr(pVM, hType);
243 AssertReturn(pType, VERR_INVALID_HANDLE);
244 AssertReturn(pType->enmKind > PGMPHYSHANDLERKIND_INVALID && pType->enmKind < PGMPHYSHANDLERKIND_END, VERR_INVALID_HANDLE);
245
246 AssertPtr(pPhysHandler);
247
248 Log(("pgmHandlerPhysicalExRegister: GCPhys=%RGp GCPhysLast=%RGp hType=%#x (%d, %s) pszDesc=%RHv:%s\n", GCPhys, GCPhysLast,
249 hType, pType->enmKind, pType->pszDesc, pPhysHandler->pszDesc, R3STRING(pPhysHandler->pszDesc)));
250 AssertReturn(pPhysHandler->Key == NIL_RTGCPHYS, VERR_WRONG_ORDER);
251
252 AssertMsgReturn(GCPhys < GCPhysLast, ("GCPhys >= GCPhysLast (%#x >= %#x)\n", GCPhys, GCPhysLast), VERR_INVALID_PARAMETER);
253 Assert(GCPhysLast - GCPhys < _4G); /* ASSUMPTION in PGMAllPhys.cpp */
254
255 switch (pType->enmKind)
256 {
257 case PGMPHYSHANDLERKIND_WRITE:
258 if (!pType->fNotInHm)
259 break;
260 RT_FALL_THRU(); /* Simplification: fNotInHm can only be used with full pages */
261 case PGMPHYSHANDLERKIND_MMIO:
262 case PGMPHYSHANDLERKIND_ALL:
263 /* Simplification for PGMPhysRead, PGMR0Trap0eHandlerNPMisconfig and others: Full pages. */
264 AssertMsgReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_INVALID_PARAMETER);
265 AssertMsgReturn((GCPhysLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK, ("%RGp\n", GCPhysLast), VERR_INVALID_PARAMETER);
266 break;
267 default:
268 AssertMsgFailed(("Invalid input enmKind=%d!\n", pType->enmKind));
269 return VERR_INVALID_PARAMETER;
270 }
271
272 /*
273 * We require the range to be within registered ram.
274 * There is no apparent need to support ranges which cover more than one ram range.
275 */
276 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
277 if ( !pRam
278 || GCPhysLast > pRam->GCPhysLast)
279 {
280#ifdef IN_RING3
281 DBGFR3Info(pVM->pUVM, "phys", NULL, NULL);
282#endif
283 AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));
284 return VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
285 }
286 Assert(GCPhys >= pRam->GCPhys && GCPhys < pRam->GCPhysLast);
287 Assert(GCPhysLast <= pRam->GCPhysLast && GCPhysLast >= pRam->GCPhys);
288
289 /*
290 * Try insert into list.
291 */
292 pPhysHandler->Key = GCPhys;
293 pPhysHandler->KeyLast = GCPhysLast;
294 pPhysHandler->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
295
296 int rc = PGM_LOCK(pVM);
297 if (RT_SUCCESS(rc))
298 {
299 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->insert(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, pPhysHandler);
300 if (RT_SUCCESS(rc))
301 {
302 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pPhysHandler, pRam, NULL /*pvBitmap*/, 0 /*offBitmap*/);
303 if (rc == VINF_PGM_SYNC_CR3)
304 rc = VINF_PGM_GCPHYS_ALIASED;
305
306#if defined(IN_RING3) || defined(IN_RING0)
307 NEMHCNotifyHandlerPhysicalRegister(pVM, pType->enmKind, GCPhys, GCPhysLast - GCPhys + 1);
308#endif
309 PGM_UNLOCK(pVM);
310
311 if (rc != VINF_SUCCESS)
312 Log(("PGMHandlerPhysicalRegisterEx: returns %Rrc (%RGp-%RGp)\n", rc, GCPhys, GCPhysLast));
313 return rc;
314 }
315 PGM_UNLOCK(pVM);
316 }
317
318 pPhysHandler->Key = NIL_RTGCPHYS;
319 pPhysHandler->KeyLast = NIL_RTGCPHYS;
320
321 AssertMsgReturn(rc == VERR_ALREADY_EXISTS, ("%Rrc GCPhys=%RGp GCPhysLast=%RGp\n", rc, GCPhys, GCPhysLast), rc);
322
323#if defined(IN_RING3) && defined(VBOX_STRICT)
324 DBGFR3Info(pVM->pUVM, "handlers", "phys nostats", NULL);
325#endif
326 AssertMsgFailed(("Conflict! GCPhys=%RGp GCPhysLast=%RGp pszDesc=%s/%s\n",
327 GCPhys, GCPhysLast, R3STRING(pPhysHandler->pszDesc), R3STRING(pType->pszDesc)));
328 return VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
329}
330
331
332/**
333 * Worker for pgmHandlerPhysicalRegisterVmxApicAccessPage.
334 *
335 * @returns VBox status code.
336 * @retval VINF_SUCCESS when successfully installed.
337 * @retval VINF_PGM_GCPHYS_ALIASED could be returned.
338 *
339 * @param pVM The cross context VM structure.
340 * @param pPhysHandler The physical handler.
341 * @param GCPhys The address of the virtual VMX APIC-access page. Must be
342 * page aligned.
343 */
344static int pgmHandlerPhysicalRegisterVmxApicAccessPage(PVMCC pVM, PPGMPHYSHANDLER pPhysHandler, RTGCPHYS GCPhys)
345{
346 PGM_LOCK_ASSERT_OWNER(pVM);
347 LogFunc(("GCPhys=%RGp\n", GCPhys));
348
349 /*
350 * We require the range to be within registered ram.
351 * There is no apparent need to support ranges which cover more than one ram range.
352 */
353 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
354 RTGCPHYS const GCPhysLast = GCPhys | X86_PAGE_4K_OFFSET_MASK;
355 if ( !pRam
356 || GCPhysLast > pRam->GCPhysLast)
357 {
358#ifdef IN_RING3
359 DBGFR3Info(pVM->pUVM, "phys", NULL, NULL);
360#endif
361 AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));
362 return VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
363 }
364 Assert(GCPhys >= pRam->GCPhys && GCPhys < pRam->GCPhysLast);
365 Assert(GCPhysLast <= pRam->GCPhysLast && GCPhysLast >= pRam->GCPhys);
366
367 /*
368 * Try insert into list.
369 */
370 pPhysHandler->Key = GCPhys;
371 pPhysHandler->KeyLast = GCPhysLast;
372 pPhysHandler->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
373
374 int rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->insert(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, pPhysHandler);
375 if (RT_SUCCESS(rc))
376 {
377 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pPhysHandler, pRam, NULL /*pvBitmap*/, 0 /*offBitmap*/);
378 if (rc == VINF_PGM_SYNC_CR3)
379 rc = VINF_PGM_GCPHYS_ALIASED;
380
381#if defined(IN_RING3) || defined(IN_RING0)
382 NEMHCNotifyHandlerPhysicalRegister(pVM, PGMPHYSHANDLERKIND_ALL, GCPhys, GCPhysLast - GCPhys + 1);
383#endif
384 return rc;
385 }
386
387 pPhysHandler->Key = NIL_RTGCPHYS;
388 pPhysHandler->KeyLast = NIL_RTGCPHYS;
389
390 AssertMsgReturn(rc == VERR_ALREADY_EXISTS, ("%Rrc GCPhys=%RGp GCPhysLast=%RGp\n", rc, GCPhys, GCPhysLast), rc);
391#if defined(IN_RING3) && defined(VBOX_STRICT)
392 DBGFR3Info(pVM->pUVM, "handlers", "phys nostats", NULL);
393#endif
394 AssertMsgFailed(("Conflict! GCPhys=%RGp GCPhysLast=%RGp\n", GCPhys, GCPhysLast));
395 return VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
396}
397
398
399/**
400 * Register a access handler for a physical range.
401 *
402 * @returns VBox status code.
403 * @retval VINF_SUCCESS when successfully installed.
404 * @retval VINF_PGM_GCPHYS_ALIASED when the shadow PTs could be updated because
405 * the guest page aliased or/and mapped by multiple PTs. A CR3 sync has been
406 * flagged together with a pool clearing.
407 * @retval VERR_PGM_HANDLER_PHYSICAL_CONFLICT if the range conflicts with an existing
408 * one. A debug assertion is raised.
409 *
410 * @param pVM The cross context VM structure.
411 * @param GCPhys Start physical address.
412 * @param GCPhysLast Last physical address. (inclusive)
413 * @param hType The handler type registration handle.
414 * @param uUser User argument to the handler.
415 * @param pszDesc Description of this handler. If NULL, the type
416 * description will be used instead.
417 */
418VMMDECL(int) PGMHandlerPhysicalRegister(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast, PGMPHYSHANDLERTYPE hType,
419 uint64_t uUser, R3PTRTYPE(const char *) pszDesc)
420{
421#ifdef LOG_ENABLED
422 PCPGMPHYSHANDLERTYPEINT pType = pgmHandlerPhysicalTypeHandleToPtr(pVM, hType);
423 Log(("PGMHandlerPhysicalRegister: GCPhys=%RGp GCPhysLast=%RGp uUser=%#RX64 hType=%#x (%d, %s) pszDesc=%RHv:%s\n",
424 GCPhys, GCPhysLast, uUser, hType, pType->enmKind, R3STRING(pType->pszDesc), pszDesc, R3STRING(pszDesc)));
425#endif
426
427 PPGMPHYSHANDLER pNew;
428 int rc = pgmHandlerPhysicalExCreate(pVM, hType, uUser, pszDesc, &pNew);
429 if (RT_SUCCESS(rc))
430 {
431 rc = pgmHandlerPhysicalExRegister(pVM, pNew, GCPhys, GCPhysLast);
432 if (RT_SUCCESS(rc))
433 return rc;
434 pgmHandlerPhysicalExDestroy(pVM, pNew);
435 }
436 return rc;
437}
438
439
440/**
441 * Register an access handler for a virtual VMX APIC-access page.
442 *
443 * This holds the PGM lock across the whole operation to resolve races between
444 * VCPUs registering the same page simultaneously. It's also a slightly slimmer
445 * version of the regular registeration function as it's specific to the VMX
446 * APIC-access page.
447 *
448 * @returns VBox status code.
449 * @retval VINF_SUCCESS when successfully installed.
450 * @retval VINF_PGM_GCPHYS_ALIASED when the shadow PTs could be updated because
451 * the guest page aliased or/and mapped by multiple PTs. A CR3 sync has been
452 * flagged together with a pool clearing.
453 * @retval VERR_PGM_HANDLER_PHYSICAL_CONFLICT if the range conflicts with an existing
454 * one. A debug assertion is raised.
455 *
456 * @param pVM The cross context VM structure.
457 * @param GCPhys The address of the VMX virtual-APIC access page. Must be
458 * page aligned.
459 * @param hType The handler type registration handle.
460 */
461VMMDECL(int) PGMHandlerPhysicalRegisterVmxApicAccessPage(PVMCC pVM, RTGCPHYS GCPhys, PGMPHYSHANDLERTYPE hType)
462{
463 PCPGMPHYSHANDLERTYPEINT const pType = pgmHandlerPhysicalTypeHandleToPtr(pVM, hType);
464 AssertReturn(pType, VERR_INVALID_HANDLE);
465 AssertMsg(!(GCPhys & GUEST_PAGE_OFFSET_MASK), ("%RGp\n", GCPhys));
466
467 /*
468 * Find if the VMX APIC access page has already been registered at this address.
469 */
470 int rc = PGM_LOCK(pVM);
471 AssertRCReturn(rc, rc);
472
473 PPGMPHYSHANDLER pHandler;
474 rc = pgmHandlerPhysicalLookup(pVM, GCPhys, &pHandler);
475 if (RT_SUCCESS(rc))
476 {
477 PCPGMPHYSHANDLERTYPEINT const pHandlerType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pHandler);
478 Assert(GCPhys >= pHandler->Key && GCPhys <= pHandler->KeyLast);
479 Assert( pHandlerType->enmKind == PGMPHYSHANDLERKIND_WRITE
480 || pHandlerType->enmKind == PGMPHYSHANDLERKIND_ALL
481 || pHandlerType->enmKind == PGMPHYSHANDLERKIND_MMIO);
482
483 /* Check it's the virtual VMX APIC-access page. */
484 if (pHandlerType->fNotInHm)
485 {
486 Assert(pHandlerType->enmKind == PGMPHYSHANDLERKIND_ALL);
487 rc = VINF_SUCCESS;
488 }
489 else
490 {
491 rc = VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
492 AssertMsgFailed(("Conflict! GCPhys=%RGp enmKind=%#x fNotInHm=%RTbool\n", GCPhys, pHandlerType->enmKind,
493 pHandlerType->fNotInHm));
494 }
495
496 PGM_UNLOCK(pVM);
497 return rc;
498 }
499
500 /*
501 * Validate the page handler parameters before registering the virtual VMX APIC-access page.
502 */
503 AssertReturn(pType->enmKind == PGMPHYSHANDLERKIND_ALL, VERR_INVALID_HANDLE);
504 AssertReturn(pType->fNotInHm, VERR_PGM_HANDLER_IPE_1);
505
506 /*
507 * Create and register a physical handler for the virtual VMX APIC-access page.
508 */
509 pHandler = pgmHandlerPhysicalExCreateWorker(pVM, pType, hType, 0 /*uUser*/, NULL /*pszDesc*/);
510 if (pHandler)
511 {
512 rc = pgmHandlerPhysicalRegisterVmxApicAccessPage(pVM, pHandler, GCPhys);
513 if (RT_SUCCESS(rc))
514 { /* likely */ }
515 else
516 pgmHandlerPhysicalExDestroy(pVM, pHandler);
517 }
518 else
519 rc = VERR_OUT_OF_RESOURCES;
520
521 PGM_UNLOCK(pVM);
522 return rc;
523}
524
525
526/**
527 * Sets ram range flags and attempts updating shadow PTs.
528 *
529 * @returns VBox status code.
530 * @retval VINF_SUCCESS when shadow PTs was successfully updated.
531 * @retval VINF_PGM_SYNC_CR3 when the shadow PTs could be updated because
532 * the guest page aliased or/and mapped by multiple PTs. FFs set.
533 * @param pVM The cross context VM structure.
534 * @param pCur The physical handler.
535 * @param pRam The RAM range.
536 * @param pvBitmap Dirty bitmap. Optional.
537 * @param offBitmap Dirty bitmap offset.
538 */
539static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVMCC pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam,
540 void *pvBitmap, uint32_t offBitmap)
541{
542 /*
543 * Iterate the guest ram pages updating the flags and flushing PT entries
544 * mapping the page.
545 */
546 bool fFlushTLBs = false;
547 int rc = VINF_SUCCESS;
548 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
549 const unsigned uState = pCurType->uState;
550 uint32_t cPages = pCur->cPages;
551 uint32_t i = (pCur->Key - pRam->GCPhys) >> GUEST_PAGE_SHIFT;
552 for (;;)
553 {
554 PPGMPAGE pPage = &pRam->aPages[i];
555 AssertMsg(pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO || PGM_PAGE_IS_MMIO(pPage),
556 ("%RGp %R[pgmpage]\n", pRam->GCPhys + (i << GUEST_PAGE_SHIFT), pPage));
557
558 /* Only do upgrades. */
559 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) < uState)
560 {
561 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, uState, pCurType->fNotInHm);
562
563 const RTGCPHYS GCPhysPage = pRam->GCPhys + (i << GUEST_PAGE_SHIFT);
564 int rc2 = pgmPoolTrackUpdateGCPhys(pVM, GCPhysPage, pPage,
565 false /* allow updates of PTEs (instead of flushing) */, &fFlushTLBs);
566 if (rc2 != VINF_SUCCESS && rc == VINF_SUCCESS)
567 rc = rc2;
568
569#ifdef VBOX_WITH_NATIVE_NEM
570 /* Tell NEM about the protection update. */
571 if (VM_IS_NEM_ENABLED(pVM))
572 {
573 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
574 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
575 NEMHCNotifyPhysPageProtChanged(pVM, GCPhysPage, PGM_PAGE_GET_HCPHYS(pPage),
576 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
577 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
578 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
579 }
580#endif
581 if (pvBitmap)
582 ASMBitSet(pvBitmap, offBitmap);
583 }
584
585 /* next */
586 if (--cPages == 0)
587 break;
588 i++;
589 offBitmap++;
590 }
591
592 if (fFlushTLBs)
593 {
594 PGM_INVL_ALL_VCPU_TLBS(pVM);
595 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: flushing guest TLBs; rc=%d\n", rc));
596 }
597 else
598 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: doesn't flush guest TLBs. rc=%Rrc; sync flags=%x VMCPU_FF_PGM_SYNC_CR3=%d\n", rc, VMMGetCpu(pVM)->pgm.s.fSyncFlags, VMCPU_FF_IS_SET(VMMGetCpu(pVM), VMCPU_FF_PGM_SYNC_CR3)));
599
600 return rc;
601}
602
603
604/**
605 * Deregister a physical page access handler.
606 *
607 * @returns VBox status code.
608 * @param pVM The cross context VM structure.
609 * @param pPhysHandler The handler to deregister (but not free).
610 */
611int pgmHandlerPhysicalExDeregister(PVMCC pVM, PPGMPHYSHANDLER pPhysHandler)
612{
613 LogFlow(("pgmHandlerPhysicalExDeregister: Removing Range %RGp-%RGp %s\n",
614 pPhysHandler->Key, pPhysHandler->KeyLast, R3STRING(pPhysHandler->pszDesc)));
615
616 int rc = PGM_LOCK(pVM);
617 AssertRCReturn(rc, rc);
618
619 RTGCPHYS const GCPhys = pPhysHandler->Key;
620 AssertReturnStmt(GCPhys != NIL_RTGCPHYS, PGM_UNLOCK(pVM), VERR_PGM_HANDLER_NOT_FOUND);
621
622 /*
623 * Remove the handler from the tree.
624 */
625
626 PPGMPHYSHANDLER pRemoved;
627 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->remove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pRemoved);
628 if (RT_SUCCESS(rc))
629 {
630 if (pRemoved == pPhysHandler)
631 {
632 /*
633 * Clear the page bits, notify the REM about this change and clear
634 * the cache.
635 */
636 pgmHandlerPhysicalResetRamFlags(pVM, pPhysHandler);
637 if (VM_IS_NEM_ENABLED(pVM))
638 pgmHandlerPhysicalDeregisterNotifyNEM(pVM, pPhysHandler);
639 pVM->pgm.s.idxLastPhysHandler = 0;
640
641 pPhysHandler->Key = NIL_RTGCPHYS;
642 pPhysHandler->KeyLast = NIL_RTGCPHYS;
643
644 PGM_UNLOCK(pVM);
645
646 return VINF_SUCCESS;
647 }
648
649 /*
650 * Both of the failure conditions here are considered internal processing
651 * errors because they can only be caused by race conditions or corruption.
652 * If we ever need to handle concurrent deregistration, we have to move
653 * the NIL_RTGCPHYS check inside the PGM lock.
654 */
655 pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->insert(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, pRemoved);
656 }
657
658 PGM_UNLOCK(pVM);
659
660 if (RT_FAILURE(rc))
661 AssertMsgFailed(("Didn't find range starting at %RGp in the tree! %Rrc=rc\n", GCPhys, rc));
662 else
663 AssertMsgFailed(("Found different handle at %RGp in the tree: got %p insteaded of %p\n",
664 GCPhys, pRemoved, pPhysHandler));
665 return VERR_PGM_HANDLER_IPE_1;
666}
667
668
669/**
670 * Destroys (frees) a physical handler.
671 *
672 * The caller must deregister it before destroying it!
673 *
674 * @returns VBox status code.
675 * @param pVM The cross context VM structure.
676 * @param pHandler The handler to free. NULL if ignored.
677 */
678int pgmHandlerPhysicalExDestroy(PVMCC pVM, PPGMPHYSHANDLER pHandler)
679{
680 if (pHandler)
681 {
682 AssertPtr(pHandler);
683 AssertReturn(pHandler->Key == NIL_RTGCPHYS, VERR_WRONG_ORDER);
684
685 int rc = PGM_LOCK(pVM);
686 if (RT_SUCCESS(rc))
687 {
688 rc = pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator.freeNode(pHandler);
689 PGM_UNLOCK(pVM);
690 }
691 return rc;
692 }
693 return VINF_SUCCESS;
694}
695
696
697/**
698 * Deregister a physical page access handler.
699 *
700 * @returns VBox status code.
701 * @param pVM The cross context VM structure.
702 * @param GCPhys Start physical address.
703 */
704VMMDECL(int) PGMHandlerPhysicalDeregister(PVMCC pVM, RTGCPHYS GCPhys)
705{
706 AssertReturn(pVM->VMCC_CTX(pgm).s.pPhysHandlerTree, VERR_PGM_HANDLER_IPE_1);
707
708 /*
709 * Find the handler.
710 */
711 int rc = PGM_LOCK(pVM);
712 AssertRCReturn(rc, rc);
713
714 PPGMPHYSHANDLER pRemoved;
715 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->remove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pRemoved);
716 if (RT_SUCCESS(rc))
717 {
718 Assert(pRemoved->Key == GCPhys);
719 LogFlow(("PGMHandlerPhysicalDeregister: Removing Range %RGp-%RGp %s\n",
720 pRemoved->Key, pRemoved->KeyLast, R3STRING(pRemoved->pszDesc)));
721
722 /*
723 * Clear the page bits, notify the REM about this change and clear
724 * the cache.
725 */
726 pgmHandlerPhysicalResetRamFlags(pVM, pRemoved);
727 if (VM_IS_NEM_ENABLED(pVM))
728 pgmHandlerPhysicalDeregisterNotifyNEM(pVM, pRemoved);
729 pVM->pgm.s.idxLastPhysHandler = 0;
730
731 pRemoved->Key = NIL_RTGCPHYS;
732 rc = pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator.freeNode(pRemoved);
733
734 PGM_UNLOCK(pVM);
735 return rc;
736 }
737
738 PGM_UNLOCK(pVM);
739
740 if (rc == VERR_NOT_FOUND)
741 {
742 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
743 rc = VERR_PGM_HANDLER_NOT_FOUND;
744 }
745 return rc;
746}
747
748
749/**
750 * Shared code with modify.
751 */
752static void pgmHandlerPhysicalDeregisterNotifyNEM(PVMCC pVM, PPGMPHYSHANDLER pCur)
753{
754#ifdef VBOX_WITH_NATIVE_NEM
755 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
756 RTGCPHYS GCPhysStart = pCur->Key;
757 RTGCPHYS GCPhysLast = pCur->KeyLast;
758
759 /*
760 * Page align the range.
761 *
762 * Since we've reset (recalculated) the physical handler state of all pages
763 * we can make use of the page states to figure out whether a page should be
764 * included in the REM notification or not.
765 */
766 if ( (pCur->Key & GUEST_PAGE_OFFSET_MASK)
767 || ((pCur->KeyLast + 1) & GUEST_PAGE_OFFSET_MASK))
768 {
769 Assert(pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO);
770
771 if (GCPhysStart & GUEST_PAGE_OFFSET_MASK)
772 {
773 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhysStart);
774 if ( pPage
775 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
776 {
777 RTGCPHYS GCPhys = (GCPhysStart + (GUEST_PAGE_SIZE - 1)) & X86_PTE_PAE_PG_MASK;
778 if ( GCPhys > GCPhysLast
779 || GCPhys < GCPhysStart)
780 return;
781 GCPhysStart = GCPhys;
782 }
783 else
784 GCPhysStart &= X86_PTE_PAE_PG_MASK;
785 Assert(!pPage || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO); /* these are page aligned atm! */
786 }
787
788 if (GCPhysLast & GUEST_PAGE_OFFSET_MASK)
789 {
790 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhysLast);
791 if ( pPage
792 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
793 {
794 RTGCPHYS GCPhys = (GCPhysLast & X86_PTE_PAE_PG_MASK) - 1;
795 if ( GCPhys < GCPhysStart
796 || GCPhys > GCPhysLast)
797 return;
798 GCPhysLast = GCPhys;
799 }
800 else
801 GCPhysLast |= GUEST_PAGE_OFFSET_MASK;
802 Assert(!pPage || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO); /* these are page aligned atm! */
803 }
804 }
805
806 /*
807 * Tell NEM.
808 */
809 PPGMRAMRANGE const pRam = pgmPhysGetRange(pVM, GCPhysStart);
810 RTGCPHYS const cb = GCPhysLast - GCPhysStart + 1;
811 uint8_t u2State = UINT8_MAX;
812 NEMHCNotifyHandlerPhysicalDeregister(pVM, pCurType->enmKind, GCPhysStart, cb,
813 pRam ? PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysStart) : NULL, &u2State);
814 if (u2State != UINT8_MAX && pRam)
815 pgmPhysSetNemStateForPages(&pRam->aPages[(GCPhysStart - pRam->GCPhys) >> GUEST_PAGE_SHIFT],
816 cb >> GUEST_PAGE_SHIFT, u2State);
817#else
818 RT_NOREF(pVM, pCur);
819#endif
820}
821
822
823/**
824 * pgmHandlerPhysicalResetRamFlags helper that checks for other handlers on
825 * edge pages.
826 */
827DECLINLINE(void) pgmHandlerPhysicalRecalcPageState(PVMCC pVM, RTGCPHYS GCPhys, bool fAbove, PPGMRAMRANGE *ppRamHint)
828{
829 /*
830 * Look for other handlers.
831 */
832 unsigned uState = PGM_PAGE_HNDL_PHYS_STATE_NONE;
833 for (;;)
834 {
835 PPGMPHYSHANDLER pCur;
836 int rc;
837 if (fAbove)
838 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookupMatchingOrAbove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator,
839 GCPhys, &pCur);
840 else
841 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookupMatchingOrBelow(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator,
842 GCPhys, &pCur);
843 if (rc == VERR_NOT_FOUND)
844 break;
845 AssertRCBreak(rc);
846 if (((fAbove ? pCur->Key : pCur->KeyLast) >> GUEST_PAGE_SHIFT) != (GCPhys >> GUEST_PAGE_SHIFT))
847 break;
848 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
849 uState = RT_MAX(uState, pCurType->uState);
850
851 /* next? */
852 RTGCPHYS GCPhysNext = fAbove
853 ? pCur->KeyLast + 1
854 : pCur->Key - 1;
855 if ((GCPhysNext >> GUEST_PAGE_SHIFT) != (GCPhys >> GUEST_PAGE_SHIFT))
856 break;
857 GCPhys = GCPhysNext;
858 }
859
860 /*
861 * Update if we found something that is a higher priority state than the current.
862 * Note! The PGMPHYSHANDLER_F_NOT_IN_HM can be ignored here as it requires whole pages.
863 */
864 if (uState != PGM_PAGE_HNDL_PHYS_STATE_NONE)
865 {
866 PPGMPAGE pPage;
867 int rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, ppRamHint);
868 if ( RT_SUCCESS(rc)
869 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) < uState)
870 {
871 /* This should normally not be necessary. */
872 PGM_PAGE_SET_HNDL_PHYS_STATE_ONLY(pPage, uState);
873 bool fFlushTLBs;
874 rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhys, pPage, false /*fFlushPTEs*/, &fFlushTLBs);
875 if (RT_SUCCESS(rc) && fFlushTLBs)
876 PGM_INVL_ALL_VCPU_TLBS(pVM);
877 else
878 AssertRC(rc);
879
880#ifdef VBOX_WITH_NATIVE_NEM
881 /* Tell NEM about the protection update. */
882 if (VM_IS_NEM_ENABLED(pVM))
883 {
884 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
885 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
886 NEMHCNotifyPhysPageProtChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage),
887 PGM_RAMRANGE_CALC_PAGE_R3PTR(*ppRamHint, GCPhys),
888 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
889 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
890 }
891#endif
892 }
893 else
894 AssertRC(rc);
895 }
896}
897
898
899/**
900 * Resets an aliased page.
901 *
902 * @param pVM The cross context VM structure.
903 * @param pPage The page.
904 * @param GCPhysPage The page address in case it comes in handy.
905 * @param pRam The RAM range the page is associated with (for NEM
906 * notifications).
907 * @param fDoAccounting Whether to perform accounting. (Only set during
908 * reset where pgmR3PhysRamReset doesn't have the
909 * handler structure handy.)
910 * @param fFlushIemTlbs Whether to perform IEM TLB flushing or not. This
911 * can be cleared only if the caller does the flushing
912 * after calling this function.
913 */
914void pgmHandlerPhysicalResetAliasedPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage, PPGMRAMRANGE pRam,
915 bool fDoAccounting, bool fFlushIemTlbs)
916{
917 Assert( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
918 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO);
919 Assert(PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
920#ifdef VBOX_WITH_NATIVE_NEM
921 RTHCPHYS const HCPhysPrev = PGM_PAGE_GET_HCPHYS(pPage);
922#endif
923
924 /*
925 * Flush any shadow page table references *first*.
926 */
927 bool fFlushTLBs = false;
928 int rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhysPage, pPage, true /*fFlushPTEs*/, &fFlushTLBs);
929 AssertLogRelRCReturnVoid(rc);
930#if defined(VBOX_VMM_TARGET_ARMV8)
931 AssertReleaseFailed();
932#else
933 HMFlushTlbOnAllVCpus(pVM);
934#endif
935
936 /*
937 * Make it an MMIO/Zero page.
938 */
939 PGM_PAGE_SET_HCPHYS(pVM, pPage, pVM->pgm.s.HCPhysZeroPg);
940 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_MMIO);
941 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
942 PGM_PAGE_SET_PAGEID(pVM, pPage, NIL_GMM_PAGEID);
943 PGM_PAGE_SET_HNDL_PHYS_STATE_ONLY(pPage, PGM_PAGE_HNDL_PHYS_STATE_ALL);
944
945 /*
946 * Flush its TLB entry.
947 */
948 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
949 if (fFlushIemTlbs)
950 IEMTlbInvalidateAllPhysicalAllCpus(pVM, NIL_VMCPUID, IEMTLBPHYSFLUSHREASON_RESET_ALIAS);
951
952 /*
953 * Do accounting for pgmR3PhysRamReset.
954 */
955 if (fDoAccounting)
956 {
957 PPGMPHYSHANDLER pHandler;
958 rc = pgmHandlerPhysicalLookup(pVM, GCPhysPage, &pHandler);
959 if (RT_SUCCESS(rc))
960 {
961 Assert(pHandler->cAliasedPages > 0);
962 pHandler->cAliasedPages--;
963 }
964 else
965 AssertMsgFailed(("rc=%Rrc GCPhysPage=%RGp\n", rc, GCPhysPage));
966 }
967
968#ifdef VBOX_WITH_NATIVE_NEM
969 /*
970 * Tell NEM about the protection change.
971 */
972 if (VM_IS_NEM_ENABLED(pVM))
973 {
974 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
975 NEMHCNotifyPhysPageChanged(pVM, GCPhysPage, HCPhysPrev,
976# ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
977 pVM->pgm.s.HCPhysZeroPg,
978# else
979 0,
980# endif
981 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
982 NEM_PAGE_PROT_NONE, PGMPAGETYPE_MMIO, &u2State);
983 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
984 }
985#else
986 RT_NOREF(pRam);
987#endif
988}
989
990
991/**
992 * Resets ram range flags.
993 *
994 * @param pVM The cross context VM structure.
995 * @param pCur The physical handler.
996 *
997 * @remark We don't start messing with the shadow page tables, as we've
998 * already got code in Trap0e which deals with out of sync handler
999 * flags (originally conceived for global pages).
1000 */
1001static void pgmHandlerPhysicalResetRamFlags(PVMCC pVM, PPGMPHYSHANDLER pCur)
1002{
1003 /*
1004 * Iterate the guest ram pages updating the state.
1005 */
1006 RTUINT cPages = pCur->cPages;
1007 RTGCPHYS GCPhys = pCur->Key;
1008 PPGMRAMRANGE pRamHint = NULL;
1009 for (;;)
1010 {
1011 PPGMPAGE pPage;
1012 int rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, &pRamHint);
1013 if (RT_SUCCESS(rc))
1014 {
1015 /* Reset aliased MMIO pages to MMIO, since this aliasing is our business.
1016 (We don't flip MMIO to RAM though, that's PGMPhys.cpp's job.) */
1017 bool fNemNotifiedAlready = false;
1018 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
1019 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO)
1020 {
1021 Assert(pCur->cAliasedPages > 0);
1022 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhys, pRamHint, false /*fDoAccounting*/, true /*fFlushIemTlbs*/);
1023 pCur->cAliasedPages--;
1024 fNemNotifiedAlready = true;
1025 }
1026#ifdef VBOX_STRICT
1027 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
1028 AssertMsg(pCurType && (pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO || PGM_PAGE_IS_MMIO(pPage)),
1029 ("%RGp %R[pgmpage]\n", GCPhys, pPage));
1030#endif
1031 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_NONE, false);
1032
1033#ifdef VBOX_WITH_NATIVE_NEM
1034 /* Tell NEM about the protection change. */
1035 if (VM_IS_NEM_ENABLED(pVM) && !fNemNotifiedAlready)
1036 {
1037 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1038 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
1039 NEMHCNotifyPhysPageProtChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage),
1040 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRamHint, GCPhys),
1041 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
1042 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1043 }
1044#endif
1045 RT_NOREF(fNemNotifiedAlready);
1046 }
1047 else
1048 AssertRC(rc);
1049
1050 /* next */
1051 if (--cPages == 0)
1052 break;
1053 GCPhys += GUEST_PAGE_SIZE;
1054 }
1055
1056 pCur->cAliasedPages = 0;
1057 pCur->cTmpOffPages = 0;
1058
1059 /*
1060 * Check for partial start and end pages.
1061 */
1062 if (pCur->Key & GUEST_PAGE_OFFSET_MASK)
1063 pgmHandlerPhysicalRecalcPageState(pVM, pCur->Key - 1, false /* fAbove */, &pRamHint);
1064 if ((pCur->KeyLast & GUEST_PAGE_OFFSET_MASK) != GUEST_PAGE_OFFSET_MASK)
1065 pgmHandlerPhysicalRecalcPageState(pVM, pCur->KeyLast + 1, true /* fAbove */, &pRamHint);
1066}
1067
1068
1069#if 0 /* unused */
1070/**
1071 * Modify a physical page access handler.
1072 *
1073 * Modification can only be done to the range it self, not the type or anything else.
1074 *
1075 * @returns VBox status code.
1076 * For all return codes other than VERR_PGM_HANDLER_NOT_FOUND and VINF_SUCCESS the range is deregistered
1077 * and a new registration must be performed!
1078 * @param pVM The cross context VM structure.
1079 * @param GCPhysCurrent Current location.
1080 * @param GCPhys New location.
1081 * @param GCPhysLast New last location.
1082 */
1083VMMDECL(int) PGMHandlerPhysicalModify(PVMCC pVM, RTGCPHYS GCPhysCurrent, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast)
1084{
1085 /*
1086 * Remove it.
1087 */
1088 int rc;
1089 PGM_LOCK_VOID(pVM);
1090 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhysCurrent);
1091 if (pCur)
1092 {
1093 /*
1094 * Clear the ram flags. (We're gonna move or free it!)
1095 */
1096 pgmHandlerPhysicalResetRamFlags(pVM, pCur);
1097 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1098 @todo pCurType validation
1099 bool const fRestoreAsRAM = pCurType->pfnHandlerR3 /** @todo this isn't entirely correct. */
1100 && pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO;
1101
1102 /*
1103 * Validate the new range, modify and reinsert.
1104 */
1105 if (GCPhysLast >= GCPhys)
1106 {
1107 /*
1108 * We require the range to be within registered ram.
1109 * There is no apparent need to support ranges which cover more than one ram range.
1110 */
1111 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
1112 if ( pRam
1113 && GCPhys <= pRam->GCPhysLast
1114 && GCPhysLast >= pRam->GCPhys)
1115 {
1116 pCur->Core.Key = GCPhys;
1117 pCur->Core.KeyLast = GCPhysLast;
1118 pCur->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + 1) >> GUEST_PAGE_SHIFT;
1119
1120 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pCur->Core))
1121 {
1122 RTGCPHYS const cb = GCPhysLast - GCPhys + 1;
1123 PGMPHYSHANDLERKIND const enmKind = pCurType->enmKind;
1124
1125 /*
1126 * Set ram flags, flush shadow PT entries and finally tell REM about this.
1127 */
1128 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam, NULL, 0);
1129
1130 /** @todo NEM: not sure we need this notification... */
1131 NEMHCNotifyHandlerPhysicalModify(pVM, enmKind, GCPhysCurrent, GCPhys, cb, fRestoreAsRAM);
1132
1133 PGM_UNLOCK(pVM);
1134
1135 PGM_INVL_ALL_VCPU_TLBS(pVM);
1136 Log(("PGMHandlerPhysicalModify: GCPhysCurrent=%RGp -> GCPhys=%RGp GCPhysLast=%RGp\n",
1137 GCPhysCurrent, GCPhys, GCPhysLast));
1138 return VINF_SUCCESS;
1139 }
1140
1141 AssertMsgFailed(("Conflict! GCPhys=%RGp GCPhysLast=%RGp\n", GCPhys, GCPhysLast));
1142 rc = VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
1143 }
1144 else
1145 {
1146 AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));
1147 rc = VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
1148 }
1149 }
1150 else
1151 {
1152 AssertMsgFailed(("Invalid range %RGp-%RGp\n", GCPhys, GCPhysLast));
1153 rc = VERR_INVALID_PARAMETER;
1154 }
1155
1156 /*
1157 * Invalid new location, flush the cache and free it.
1158 * We've only gotta notify REM and free the memory.
1159 */
1160 if (VM_IS_NEM_ENABLED(pVM))
1161 pgmHandlerPhysicalDeregisterNotifyNEM(pVM, pCur);
1162 pVM->pgm.s.pLastPhysHandlerR0 = 0;
1163 pVM->pgm.s.pLastPhysHandlerR3 = 0;
1164 PGMHandlerPhysicalTypeRelease(pVM, pCur->hType);
1165 MMHyperFree(pVM, pCur);
1166 }
1167 else
1168 {
1169 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhysCurrent));
1170 rc = VERR_PGM_HANDLER_NOT_FOUND;
1171 }
1172
1173 PGM_UNLOCK(pVM);
1174 return rc;
1175}
1176#endif /* unused */
1177
1178
1179/**
1180 * Changes the user callback arguments associated with a physical access handler.
1181 *
1182 * @returns VBox status code.
1183 * @param pVM The cross context VM structure.
1184 * @param GCPhys Start physical address of the handler.
1185 * @param uUser User argument to the handlers.
1186 */
1187VMMDECL(int) PGMHandlerPhysicalChangeUserArg(PVMCC pVM, RTGCPHYS GCPhys, uint64_t uUser)
1188{
1189 /*
1190 * Find the handler and make the change.
1191 */
1192 int rc = PGM_LOCK(pVM);
1193 AssertRCReturn(rc, rc);
1194
1195 PPGMPHYSHANDLER pCur;
1196 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1197 if (RT_SUCCESS(rc))
1198 {
1199 Assert(pCur->Key == GCPhys);
1200 pCur->uUser = uUser;
1201 }
1202 else if (rc == VERR_NOT_FOUND)
1203 {
1204 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
1205 rc = VERR_PGM_HANDLER_NOT_FOUND;
1206 }
1207
1208 PGM_UNLOCK(pVM);
1209 return rc;
1210}
1211
1212#if 0 /* unused */
1213
1214/**
1215 * Splits a physical access handler in two.
1216 *
1217 * @returns VBox status code.
1218 * @param pVM The cross context VM structure.
1219 * @param GCPhys Start physical address of the handler.
1220 * @param GCPhysSplit The split address.
1221 */
1222VMMDECL(int) PGMHandlerPhysicalSplit(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysSplit)
1223{
1224 AssertReturn(GCPhys < GCPhysSplit, VERR_INVALID_PARAMETER);
1225
1226 /*
1227 * Do the allocation without owning the lock.
1228 */
1229 PPGMPHYSHANDLER pNew;
1230 int rc = MMHyperAlloc(pVM, sizeof(*pNew), 0, MM_TAG_PGM_HANDLERS, (void **)&pNew);
1231 if (RT_FAILURE(rc))
1232 return rc;
1233
1234 /*
1235 * Get the handler.
1236 */
1237 PGM_LOCK_VOID(pVM);
1238 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1239 if (RT_LIKELY(pCur))
1240 {
1241 if (RT_LIKELY(GCPhysSplit <= pCur->Core.KeyLast))
1242 {
1243 /*
1244 * Create new handler node for the 2nd half.
1245 */
1246 *pNew = *pCur;
1247 pNew->Core.Key = GCPhysSplit;
1248 pNew->cPages = (pNew->Core.KeyLast - (pNew->Core.Key & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
1249
1250 pCur->Core.KeyLast = GCPhysSplit - 1;
1251 pCur->cPages = (pCur->Core.KeyLast - (pCur->Core.Key & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
1252
1253 if (RT_LIKELY(RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pNew->Core)))
1254 {
1255 LogFlow(("PGMHandlerPhysicalSplit: %RGp-%RGp and %RGp-%RGp\n",
1256 pCur->Core.Key, pCur->Core.KeyLast, pNew->Core.Key, pNew->Core.KeyLast));
1257 PGM_UNLOCK(pVM);
1258 return VINF_SUCCESS;
1259 }
1260 AssertMsgFailed(("whu?\n"));
1261 rc = VERR_PGM_PHYS_HANDLER_IPE;
1262 }
1263 else
1264 {
1265 AssertMsgFailed(("outside range: %RGp-%RGp split %RGp\n", pCur->Core.Key, pCur->Core.KeyLast, GCPhysSplit));
1266 rc = VERR_INVALID_PARAMETER;
1267 }
1268 }
1269 else
1270 {
1271 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
1272 rc = VERR_PGM_HANDLER_NOT_FOUND;
1273 }
1274 PGM_UNLOCK(pVM);
1275 MMHyperFree(pVM, pNew);
1276 return rc;
1277}
1278
1279
1280/**
1281 * Joins up two adjacent physical access handlers which has the same callbacks.
1282 *
1283 * @returns VBox status code.
1284 * @param pVM The cross context VM structure.
1285 * @param GCPhys1 Start physical address of the first handler.
1286 * @param GCPhys2 Start physical address of the second handler.
1287 */
1288VMMDECL(int) PGMHandlerPhysicalJoin(PVMCC pVM, RTGCPHYS GCPhys1, RTGCPHYS GCPhys2)
1289{
1290 /*
1291 * Get the handlers.
1292 */
1293 int rc;
1294 PGM_LOCK_VOID(pVM);
1295 PPGMPHYSHANDLER pCur1 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys1);
1296 if (RT_LIKELY(pCur1))
1297 {
1298 PPGMPHYSHANDLER pCur2 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys2);
1299 if (RT_LIKELY(pCur2))
1300 {
1301 /*
1302 * Make sure that they are adjacent, and that they've got the same callbacks.
1303 */
1304 if (RT_LIKELY(pCur1->Core.KeyLast + 1 == pCur2->Core.Key))
1305 {
1306 if (RT_LIKELY(pCur1->hType == pCur2->hType))
1307 {
1308 PPGMPHYSHANDLER pCur3 = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys2);
1309 if (RT_LIKELY(pCur3 == pCur2))
1310 {
1311 pCur1->Core.KeyLast = pCur2->Core.KeyLast;
1312 pCur1->cPages = (pCur1->Core.KeyLast - (pCur1->Core.Key & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
1313 LogFlow(("PGMHandlerPhysicalJoin: %RGp-%RGp %RGp-%RGp\n",
1314 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
1315 pVM->pgm.s.pLastPhysHandlerR0 = 0;
1316 pVM->pgm.s.pLastPhysHandlerR3 = 0;
1317 PGMHandlerPhysicalTypeRelease(pVM, pCur2->hType);
1318 MMHyperFree(pVM, pCur2);
1319 PGM_UNLOCK(pVM);
1320 return VINF_SUCCESS;
1321 }
1322
1323 Assert(pCur3 == pCur2);
1324 rc = VERR_PGM_PHYS_HANDLER_IPE;
1325 }
1326 else
1327 {
1328 AssertMsgFailed(("mismatching handlers\n"));
1329 rc = VERR_ACCESS_DENIED;
1330 }
1331 }
1332 else
1333 {
1334 AssertMsgFailed(("not adjacent: %RGp-%RGp %RGp-%RGp\n",
1335 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
1336 rc = VERR_INVALID_PARAMETER;
1337 }
1338 }
1339 else
1340 {
1341 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys2));
1342 rc = VERR_PGM_HANDLER_NOT_FOUND;
1343 }
1344 }
1345 else
1346 {
1347 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys1));
1348 rc = VERR_PGM_HANDLER_NOT_FOUND;
1349 }
1350 PGM_UNLOCK(pVM);
1351 return rc;
1352
1353}
1354
1355#endif /* unused */
1356
1357/**
1358 * Resets any modifications to individual pages in a physical page access
1359 * handler region.
1360 *
1361 * This is used in pair with PGMHandlerPhysicalPageTempOff(),
1362 * PGMHandlerPhysicalPageAliasMmio2() or PGMHandlerPhysicalPageAliasHC().
1363 *
1364 * @returns VBox status code.
1365 * @param pVM The cross context VM structure.
1366 * @param GCPhys The start address of the handler regions, i.e. what you
1367 * passed to PGMR3HandlerPhysicalRegister(),
1368 * PGMHandlerPhysicalRegisterEx() or
1369 * PGMHandlerPhysicalModify().
1370 */
1371VMMDECL(int) PGMHandlerPhysicalReset(PVMCC pVM, RTGCPHYS GCPhys)
1372{
1373 LogFlow(("PGMHandlerPhysicalReset GCPhys=%RGp\n", GCPhys));
1374 int rc = PGM_LOCK(pVM);
1375 AssertRCReturn(rc, rc);
1376
1377 /*
1378 * Find the handler.
1379 */
1380 PPGMPHYSHANDLER pCur;
1381 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1382 if (RT_SUCCESS(rc))
1383 {
1384 Assert(pCur->Key == GCPhys);
1385
1386 /*
1387 * Validate kind.
1388 */
1389 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1390 switch (pCurType->enmKind)
1391 {
1392 case PGMPHYSHANDLERKIND_WRITE:
1393 case PGMPHYSHANDLERKIND_ALL:
1394 case PGMPHYSHANDLERKIND_MMIO: /* NOTE: Only use when clearing MMIO ranges with aliased MMIO2 pages! */
1395 {
1396 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysHandlerReset)); /** @todo move out of switch */
1397 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
1398 Assert(pRam);
1399 Assert(pRam->GCPhys <= pCur->Key);
1400 Assert(pRam->GCPhysLast >= pCur->KeyLast);
1401
1402 if (pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO)
1403 {
1404 /*
1405 * Reset all the PGMPAGETYPE_MMIO2_ALIAS_MMIO pages first and that's it.
1406 * This could probably be optimized a bit wrt to flushing, but I'm too lazy
1407 * to do that now...
1408 */
1409 if (pCur->cAliasedPages)
1410 {
1411 PPGMPAGE pPage = &pRam->aPages[(pCur->Key - pRam->GCPhys) >> GUEST_PAGE_SHIFT];
1412 RTGCPHYS GCPhysPage = pCur->Key;
1413 uint32_t cLeft = pCur->cPages;
1414 bool fFlushIemTlb = false;
1415 while (cLeft-- > 0)
1416 {
1417 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
1418 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO)
1419 {
1420 fFlushIemTlb |= PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO;
1421 Assert(pCur->cAliasedPages > 0);
1422 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhysPage, pRam,
1423 false /*fDoAccounting*/, false /*fFlushIemTlbs*/);
1424 --pCur->cAliasedPages;
1425#ifndef VBOX_STRICT
1426 if (pCur->cAliasedPages == 0)
1427 break;
1428#endif
1429 }
1430 Assert(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO);
1431 GCPhysPage += GUEST_PAGE_SIZE;
1432 pPage++;
1433 }
1434 Assert(pCur->cAliasedPages == 0);
1435
1436 /*
1437 * Flush IEM TLBs in case they contain any references to aliased pages.
1438 * This is only necessary for MMIO2 aliases.
1439 */
1440 if (fFlushIemTlb)
1441 IEMTlbInvalidateAllPhysicalAllCpus(pVM, NIL_VMCPUID, IEMTLBPHYSFLUSHREASON_HANDLER_RESET);
1442 }
1443 }
1444 else if (pCur->cTmpOffPages > 0)
1445 {
1446 /*
1447 * Set the flags and flush shadow PT entries.
1448 */
1449 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam, NULL /*pvBitmap*/, 0 /*offBitmap*/);
1450 }
1451
1452 pCur->cAliasedPages = 0;
1453 pCur->cTmpOffPages = 0;
1454
1455 rc = VINF_SUCCESS;
1456 break;
1457 }
1458
1459 /*
1460 * Invalid.
1461 */
1462 default:
1463 AssertMsgFailed(("Invalid type %d/%#x! Corruption!\n", pCurType->enmKind, pCur->hType));
1464 rc = VERR_PGM_PHYS_HANDLER_IPE;
1465 break;
1466 }
1467 }
1468 else if (rc == VERR_NOT_FOUND)
1469 {
1470 AssertMsgFailed(("Didn't find MMIO Range starting at %#x\n", GCPhys));
1471 rc = VERR_PGM_HANDLER_NOT_FOUND;
1472 }
1473
1474 PGM_UNLOCK(pVM);
1475 return rc;
1476}
1477
1478
1479/**
1480 * Special version of PGMHandlerPhysicalReset used by MMIO2 w/ dirty page
1481 * tracking.
1482 *
1483 * @returns VBox status code.
1484 * @param pVM The cross context VM structure.
1485 * @param GCPhys The start address of the handler region.
1486 * @param pvBitmap Dirty bitmap. Caller has cleared this already, only
1487 * dirty bits will be set. Caller also made sure it's big
1488 * enough.
1489 * @param offBitmap Dirty bitmap offset.
1490 * @remarks Caller must own the PGM critical section.
1491 */
1492DECLHIDDEN(int) pgmHandlerPhysicalResetMmio2WithBitmap(PVMCC pVM, RTGCPHYS GCPhys, void *pvBitmap, uint32_t offBitmap)
1493{
1494 LogFlow(("pgmHandlerPhysicalResetMmio2WithBitmap GCPhys=%RGp\n", GCPhys));
1495 PGM_LOCK_ASSERT_OWNER(pVM);
1496
1497 /*
1498 * Find the handler.
1499 */
1500 PPGMPHYSHANDLER pCur;
1501 int rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1502 if (RT_SUCCESS(rc))
1503 {
1504 Assert(pCur->Key == GCPhys);
1505
1506 /*
1507 * Validate kind.
1508 */
1509 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
1510 if ( pCurType
1511 && pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE)
1512 {
1513 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysHandlerReset));
1514
1515#ifdef VBOX_STRICT
1516 PPGMRAMRANGE const pRamStrict = pgmPhysGetRange(pVM, GCPhys);
1517 Assert(pRamStrict && pRamStrict->GCPhys <= pCur->Key);
1518 Assert(pRamStrict && pRamStrict->GCPhysLast >= pCur->KeyLast);
1519#endif
1520
1521 /*
1522 * Set the flags and flush shadow PT entries.
1523 */
1524 if (pCur->cTmpOffPages > 0)
1525 {
1526 PPGMRAMRANGE const pRam = pgmPhysGetRange(pVM, GCPhys);
1527 if (pRam) /* paranoia */
1528 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam, pvBitmap, offBitmap);
1529 else
1530 AssertFailed();
1531 pCur->cTmpOffPages = 0;
1532 }
1533 else
1534 rc = VINF_SUCCESS;
1535 }
1536 else
1537 {
1538 AssertFailed();
1539 rc = VERR_WRONG_TYPE;
1540 }
1541 }
1542 else if (rc == VERR_NOT_FOUND)
1543 {
1544 AssertMsgFailed(("Didn't find MMIO Range starting at %#x\n", GCPhys));
1545 rc = VERR_PGM_HANDLER_NOT_FOUND;
1546 }
1547
1548 return rc;
1549}
1550
1551
1552/**
1553 * Temporarily turns off the access monitoring of a page within a monitored
1554 * physical write/all page access handler region.
1555 *
1556 * Use this when no further \#PFs are required for that page. Be aware that
1557 * a page directory sync might reset the flags, and turn on access monitoring
1558 * for the page.
1559 *
1560 * The caller must do required page table modifications.
1561 *
1562 * @returns VBox status code.
1563 * @param pVM The cross context VM structure.
1564 * @param GCPhys The start address of the access handler. This
1565 * must be a fully page aligned range or we risk
1566 * messing up other handlers installed for the
1567 * start and end pages.
1568 * @param GCPhysPage The physical address of the page to turn off
1569 * access monitoring for.
1570 */
1571VMMDECL(int) PGMHandlerPhysicalPageTempOff(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage)
1572{
1573 LogFlow(("PGMHandlerPhysicalPageTempOff GCPhysPage=%RGp\n", GCPhysPage));
1574 int rc = PGM_LOCK(pVM);
1575 AssertRCReturn(rc, rc);
1576
1577 /*
1578 * Validate the range.
1579 */
1580 PPGMPHYSHANDLER pCur;
1581 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1582 if (RT_SUCCESS(rc))
1583 {
1584 Assert(pCur->Key == GCPhys);
1585 if (RT_LIKELY( GCPhysPage >= pCur->Key
1586 && GCPhysPage <= pCur->KeyLast))
1587 {
1588 Assert(!(pCur->Key & GUEST_PAGE_OFFSET_MASK));
1589 Assert((pCur->KeyLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK);
1590
1591 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
1592 AssertReturnStmt( pCurType
1593 && ( pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE
1594 || pCurType->enmKind == PGMPHYSHANDLERKIND_ALL),
1595 PGM_UNLOCK(pVM), VERR_ACCESS_DENIED);
1596
1597 /*
1598 * Change the page status.
1599 */
1600 PPGMPAGE pPage;
1601 PPGMRAMRANGE pRam;
1602 rc = pgmPhysGetPageAndRangeEx(pVM, GCPhysPage, &pPage, &pRam);
1603 AssertReturnStmt(RT_SUCCESS_NP(rc), PGM_UNLOCK(pVM), rc);
1604 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
1605 {
1606 PGM_PAGE_SET_HNDL_PHYS_STATE_ONLY(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
1607 pCur->cTmpOffPages++;
1608
1609#ifdef VBOX_WITH_NATIVE_NEM
1610 /* Tell NEM about the protection change (VGA is using this to track dirty pages). */
1611 if (VM_IS_NEM_ENABLED(pVM))
1612 {
1613 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1614 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
1615 NEMHCNotifyPhysPageProtChanged(pVM, GCPhysPage, PGM_PAGE_GET_HCPHYS(pPage),
1616 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
1617 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
1618 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1619 }
1620#endif
1621 }
1622 PGM_UNLOCK(pVM);
1623 return VINF_SUCCESS;
1624 }
1625 PGM_UNLOCK(pVM);
1626 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n", GCPhysPage, pCur->Key, pCur->KeyLast));
1627 return VERR_INVALID_PARAMETER;
1628 }
1629 PGM_UNLOCK(pVM);
1630
1631 if (rc == VERR_NOT_FOUND)
1632 {
1633 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
1634 return VERR_PGM_HANDLER_NOT_FOUND;
1635 }
1636 return rc;
1637}
1638
1639
1640#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
1641/**
1642 * Resolves an MMIO2 page.
1643 *
1644 * Caller as taken the PGM lock.
1645 *
1646 * @returns Pointer to the page if valid, NULL otherwise
1647 * @param pVM The cross context VM structure.
1648 * @param pDevIns The device owning it.
1649 * @param hMmio2 The MMIO2 region.
1650 * @param offMmio2Page The offset into the region.
1651 */
1652static PPGMPAGE pgmPhysResolveMmio2PageLocked(PVMCC pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, RTGCPHYS offMmio2Page)
1653{
1654 /* Only works if the handle is in the handle table! */
1655 AssertReturn(hMmio2 != 0, NULL);
1656 uint32_t const cMmio2Ranges = RT_MIN(pVM->pgm.s.cMmio2Ranges, RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges));
1657 AssertReturn(hMmio2 <= cMmio2Ranges, NULL);
1658 AssertCompile(RT_ELEMENTS(pVM->pgm.s.apMmio2RamRanges) == RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges));
1659# ifdef IN_RING0
1660 AssertCompile(RT_ELEMENTS(pVM->pgmr0.s.apMmio2RamRanges) == RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges));
1661 AssertCompile(RT_ELEMENTS(pVM->pgmr0.s.acMmio2RangePages) == RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges));
1662# endif
1663 uint32_t const idxFirst = hMmio2 - 1U;
1664
1665 /* Must check the first one for PGMREGMMIO2RANGE_F_FIRST_CHUNK. */
1666 AssertReturn(pVM->pgm.s.aMmio2Ranges[idxFirst].fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK, NULL);
1667# ifdef IN_RING0
1668 AssertReturn(pVM->pgmr0.s.ahMmio2MapObjs[idxFirst] != NIL_RTR0MEMOBJ, NULL); /* Only the first chunk has a backing object. */
1669# endif
1670
1671 /* Loop thru the sub-ranges till we find the one covering offMmio2. */
1672 for (uint32_t idx = idxFirst; idx < cMmio2Ranges; idx++)
1673 {
1674# ifdef IN_RING3
1675 AssertReturn(pVM->pgm.s.aMmio2Ranges[idx].pDevInsR3 == pDevIns, NULL);
1676# else
1677 AssertReturn(pVM->pgm.s.aMmio2Ranges[idx].pDevInsR3 == pDevIns->pDevInsForR3, NULL);
1678# endif
1679
1680 /* Does it match the offset? */
1681 PPGMRAMRANGE const pRamRange = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apMmio2RamRanges[idx];
1682 AssertReturn(pRamRange, NULL);
1683# ifdef IN_RING3
1684 RTGCPHYS const cbRange = RT_MIN(pRamRange->cb, pVM->pgm.s.aMmio2Ranges[idx].cbReal);
1685# else
1686 RTGCPHYS const cbRange = RT_MIN(pRamRange->cb, (RTGCPHYS)pVM->pgmr0.s.acMmio2RangePages[idx] << GUEST_PAGE_SHIFT);
1687# endif
1688 if (offMmio2Page < cbRange)
1689 return &pRamRange->aPages[offMmio2Page >> GUEST_PAGE_SHIFT];
1690
1691 /* Advance. */
1692 AssertReturn(!(pVM->pgm.s.aMmio2Ranges[idx].fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK), NULL);
1693 offMmio2Page -= cbRange;
1694 }
1695 AssertFailed();
1696 return NULL;
1697}
1698#endif /* !VBOX_WITH_ONLY_PGM_NEM_MODE */
1699
1700
1701/**
1702 * Replaces an MMIO page with an MMIO2 page.
1703 *
1704 * This is a worker for IOMMMIOMapMMIO2Page that works in a similar way to
1705 * PGMHandlerPhysicalPageTempOff but for an MMIO page. Since an MMIO page has no
1706 * backing, the caller must provide a replacement page. For various reasons the
1707 * replacement page must be an MMIO2 page.
1708 *
1709 * The caller must do required page table modifications. You can get away
1710 * without making any modifications since it's an MMIO page, the cost is an extra
1711 * \#PF which will the resync the page.
1712 *
1713 * Call PGMHandlerPhysicalReset() to restore the MMIO page.
1714 *
1715 * The caller may still get handler callback even after this call and must be
1716 * able to deal correctly with such calls. The reason for these callbacks are
1717 * either that we're executing in the recompiler (which doesn't know about this
1718 * arrangement) or that we've been restored from saved state (where we won't
1719 * save the change).
1720 *
1721 * @returns VBox status code.
1722 * @param pVM The cross context VM structure.
1723 * @param GCPhys The start address of the access handler. This
1724 * must be a fully page aligned range or we risk
1725 * messing up other handlers installed for the
1726 * start and end pages.
1727 * @param GCPhysPage The physical address of the page to turn off
1728 * access monitoring for and replace with the MMIO2
1729 * page.
1730 * @param pDevIns The device instance owning @a hMmio2.
1731 * @param hMmio2 Handle to the MMIO2 region containing the page
1732 * to remap in the the MMIO page at @a GCPhys.
1733 * @param offMmio2PageRemap The offset into @a hMmio2 of the MMIO2 page that
1734 * should serve as backing memory.
1735 *
1736 * @remark May cause a page pool flush if used on a page that is already
1737 * aliased.
1738 *
1739 * @note This trick does only work reliably if the two pages are never ever
1740 * mapped in the same page table. If they are the page pool code will
1741 * be confused should either of them be flushed. See the special case
1742 * of zero page aliasing mentioned in #3170.
1743 *
1744 */
1745VMMDECL(int) PGMHandlerPhysicalPageAliasMmio2(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage,
1746 PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, RTGCPHYS offMmio2PageRemap)
1747{
1748#ifdef VBOX_WITH_ONLY_PGM_NEM_MODE
1749 RT_NOREF(pVM, GCPhys, GCPhysPage, pDevIns, hMmio2, offMmio2PageRemap);
1750 AssertFailedReturn(VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE);
1751#else
1752# ifdef VBOX_WITH_PGM_NEM_MODE
1753 AssertReturn(!VM_IS_NEM_ENABLED(pVM) || !pVM->pgm.s.fNemMode, VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE);
1754# endif
1755 int rc = PGM_LOCK(pVM);
1756 AssertRCReturn(rc, rc);
1757
1758 /*
1759 * Resolve the MMIO2 reference.
1760 */
1761 PPGMPAGE pPageRemap = pgmPhysResolveMmio2PageLocked(pVM, pDevIns, hMmio2, offMmio2PageRemap);
1762 if (RT_LIKELY(pPageRemap))
1763 AssertMsgReturnStmt(PGM_PAGE_GET_TYPE(pPageRemap) == PGMPAGETYPE_MMIO2,
1764 ("hMmio2=%RU64 offMmio2PageRemap=%RGp %R[pgmpage]\n", hMmio2, offMmio2PageRemap, pPageRemap),
1765 PGM_UNLOCK(pVM), VERR_PGM_PHYS_NOT_MMIO2);
1766 else
1767 {
1768 PGM_UNLOCK(pVM);
1769 return VERR_OUT_OF_RANGE;
1770 }
1771
1772 /*
1773 * Lookup and validate the range.
1774 */
1775 PPGMPHYSHANDLER pCur;
1776 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1777 if (RT_SUCCESS(rc))
1778 {
1779 Assert(pCur->Key == GCPhys);
1780 if (RT_LIKELY( GCPhysPage >= pCur->Key
1781 && GCPhysPage <= pCur->KeyLast))
1782 {
1783 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1784 AssertReturnStmt(pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO, PGM_UNLOCK(pVM), VERR_ACCESS_DENIED);
1785 AssertReturnStmt(!(pCur->Key & GUEST_PAGE_OFFSET_MASK), PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
1786 AssertReturnStmt((pCur->KeyLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK,
1787 PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
1788
1789 /*
1790 * Validate the page.
1791 */
1792 PPGMPAGE pPage;
1793 PPGMRAMRANGE pRam;
1794 rc = pgmPhysGetPageAndRangeEx(pVM, GCPhysPage, &pPage, &pRam);
1795 AssertReturnStmt(RT_SUCCESS_NP(rc), PGM_UNLOCK(pVM), rc);
1796 if (PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO)
1797 {
1798 AssertMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO,
1799 ("GCPhysPage=%RGp %R[pgmpage]\n", GCPhysPage, pPage),
1800 VERR_PGM_PHYS_NOT_MMIO2);
1801 if (PGM_PAGE_GET_HCPHYS(pPage) == PGM_PAGE_GET_HCPHYS(pPageRemap))
1802 {
1803 PGM_UNLOCK(pVM);
1804 return VINF_PGM_HANDLER_ALREADY_ALIASED;
1805 }
1806
1807 /*
1808 * The page is already mapped as some other page, reset it
1809 * to an MMIO/ZERO page before doing the new mapping.
1810 */
1811 Log(("PGMHandlerPhysicalPageAliasMmio2: GCPhysPage=%RGp (%R[pgmpage]; %RHp -> %RHp\n",
1812 GCPhysPage, pPage, PGM_PAGE_GET_HCPHYS(pPage), PGM_PAGE_GET_HCPHYS(pPageRemap)));
1813 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhysPage, pRam,
1814 false /*fDoAccounting*/, false /*fFlushIemTlbs*/);
1815 pCur->cAliasedPages--;
1816
1817 /* Since this may be present in the TLB and now be wrong, invalid
1818 the guest physical address part of the IEM TLBs. Note, we do
1819 this here as we will not invalid */
1820 IEMTlbInvalidateAllPhysicalAllCpus(pVM, NIL_VMCPUID, IEMTLBPHYSFLUSHREASON_MMIO2_ALIAS);
1821 }
1822 Assert(PGM_PAGE_IS_ZERO(pPage));
1823
1824 /*
1825 * Do the actual remapping here.
1826 * This page now serves as an alias for the backing memory specified.
1827 */
1828 LogFlow(("PGMHandlerPhysicalPageAliasMmio2: %RGp (%R[pgmpage]) alias for %RU64/%RGp (%R[pgmpage])\n",
1829 GCPhysPage, pPage, hMmio2, offMmio2PageRemap, pPageRemap ));
1830 PGM_PAGE_SET_HCPHYS(pVM, pPage, PGM_PAGE_GET_HCPHYS(pPageRemap));
1831 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_MMIO2_ALIAS_MMIO);
1832 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1833 PGM_PAGE_SET_PAGEID(pVM, pPage, PGM_PAGE_GET_PAGEID(pPageRemap));
1834 PGM_PAGE_SET_HNDL_PHYS_STATE_ONLY(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
1835 pCur->cAliasedPages++;
1836 Assert(pCur->cAliasedPages <= pCur->cPages);
1837
1838 /*
1839 * Flush its TLB entry.
1840 *
1841 * Not calling IEMTlbInvalidateAllPhysicalAllCpus here to conserve
1842 * all the other IEM TLB entires. When this one is kicked out and
1843 * reloaded, it will be using the MMIO2 alias, but till then we'll
1844 * continue doing MMIO.
1845 */
1846 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
1847 /** @todo Do some preformance checks of calling
1848 * IEMTlbInvalidateAllPhysicalAllCpus when in IEM mode, to see if it
1849 * actually makes sense or not. Screen updates are typically massive
1850 * and important when this kind of aliasing is used, so it may pay of... */
1851
1852# ifdef VBOX_WITH_NATIVE_NEM
1853 /* Tell NEM about the backing and protection change. */
1854 if (VM_IS_NEM_ENABLED(pVM))
1855 {
1856 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1857 NEMHCNotifyPhysPageChanged(pVM, GCPhysPage, pVM->pgm.s.HCPhysZeroPg, PGM_PAGE_GET_HCPHYS(pPage),
1858 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
1859 pgmPhysPageCalcNemProtection(pPage, PGMPAGETYPE_MMIO2_ALIAS_MMIO),
1860 PGMPAGETYPE_MMIO2_ALIAS_MMIO, &u2State);
1861 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1862 }
1863# endif
1864 LogFlow(("PGMHandlerPhysicalPageAliasMmio2: => %R[pgmpage]\n", pPage));
1865 PGM_UNLOCK(pVM);
1866 return VINF_SUCCESS;
1867 }
1868
1869 PGM_UNLOCK(pVM);
1870 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n", GCPhysPage, pCur->Key, pCur->KeyLast));
1871 return VERR_INVALID_PARAMETER;
1872 }
1873
1874 PGM_UNLOCK(pVM);
1875 if (rc == VERR_NOT_FOUND)
1876 {
1877 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
1878 return VERR_PGM_HANDLER_NOT_FOUND;
1879 }
1880 return rc;
1881#endif /* !VBOX_WITH_ONLY_PGM_NEM_MODE */
1882}
1883
1884
1885/**
1886 * Replaces an MMIO page with an arbitrary HC page in the shadow page tables.
1887 *
1888 * This differs from PGMHandlerPhysicalPageAliasMmio2 in that the page doesn't
1889 * need to be a known MMIO2 page and that only shadow paging may access the
1890 * page. The latter distinction is important because the only use for this
1891 * feature is for mapping the special APIC access page that VT-x uses to detect
1892 * APIC MMIO operations, the page is shared between all guest CPUs and actually
1893 * not written to. At least at the moment.
1894 *
1895 * The caller must do required page table modifications. You can get away
1896 * without making any modifications since it's an MMIO page, the cost is an extra
1897 * \#PF which will the resync the page.
1898 *
1899 * Call PGMHandlerPhysicalReset() to restore the MMIO page.
1900 *
1901 *
1902 * @returns VBox status code.
1903 * @param pVM The cross context VM structure.
1904 * @param GCPhys The start address of the access handler. This
1905 * must be a fully page aligned range or we risk
1906 * messing up other handlers installed for the
1907 * start and end pages.
1908 * @param GCPhysPage The physical address of the page to turn off
1909 * access monitoring for.
1910 * @param HCPhysPageRemap The physical address of the HC page that
1911 * serves as backing memory.
1912 *
1913 * @remark May cause a page pool flush if used on a page that is already
1914 * aliased.
1915 */
1916VMMDECL(int) PGMHandlerPhysicalPageAliasHC(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage, RTHCPHYS HCPhysPageRemap)
1917{
1918#ifdef VBOX_WITH_ONLY_PGM_NEM_MODE
1919 RT_NOREF(pVM, GCPhys, GCPhysPage, HCPhysPageRemap);
1920 AssertFailedReturn(VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE);
1921#else
1922/// Assert(!IOMIsLockOwner(pVM)); /* We mustn't own any other locks when calling this */
1923# ifdef VBOX_WITH_PGM_NEM_MODE
1924 AssertReturn(!VM_IS_NEM_ENABLED(pVM) || !pVM->pgm.s.fNemMode, VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE);
1925# endif
1926 int rc = PGM_LOCK(pVM);
1927 AssertRCReturn(rc, rc);
1928
1929 /*
1930 * Lookup and validate the range.
1931 */
1932 PPGMPHYSHANDLER pCur;
1933 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1934 if (RT_SUCCESS(rc))
1935 {
1936 Assert(pCur->Key == GCPhys);
1937 if (RT_LIKELY( GCPhysPage >= pCur->Key
1938 && GCPhysPage <= pCur->KeyLast))
1939 {
1940 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1941 AssertReturnStmt(pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO, PGM_UNLOCK(pVM), VERR_ACCESS_DENIED);
1942 AssertReturnStmt(!(pCur->Key & GUEST_PAGE_OFFSET_MASK), PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
1943 AssertReturnStmt((pCur->KeyLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK,
1944 PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
1945
1946 /*
1947 * Get and validate the pages.
1948 */
1949 PPGMPAGE pPage = NULL;
1950# ifdef VBOX_WITH_NATIVE_NEM
1951 PPGMRAMRANGE pRam = NULL;
1952 rc = pgmPhysGetPageAndRangeEx(pVM, GCPhysPage, &pPage, &pRam);
1953# else
1954 rc = pgmPhysGetPageEx(pVM, GCPhysPage, &pPage);
1955# endif
1956 AssertReturnStmt(RT_SUCCESS_NP(rc), PGM_UNLOCK(pVM), rc);
1957 if (PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO)
1958 {
1959 PGM_UNLOCK(pVM);
1960 AssertMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO,
1961 ("GCPhysPage=%RGp %R[pgmpage]\n", GCPhysPage, pPage),
1962 VERR_PGM_PHYS_NOT_MMIO2);
1963 return VINF_PGM_HANDLER_ALREADY_ALIASED;
1964 }
1965 Assert(PGM_PAGE_IS_ZERO(pPage));
1966
1967 /*
1968 * Do the actual remapping here.
1969 * This page now serves as an alias for the backing memory
1970 * specified as far as shadow paging is concerned.
1971 */
1972 LogFlow(("PGMHandlerPhysicalPageAliasHC: %RGp (%R[pgmpage]) alias for %RHp\n",
1973 GCPhysPage, pPage, HCPhysPageRemap));
1974 PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhysPageRemap);
1975 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_SPECIAL_ALIAS_MMIO);
1976 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1977 PGM_PAGE_SET_PAGEID(pVM, pPage, NIL_GMM_PAGEID);
1978 PGM_PAGE_SET_HNDL_PHYS_STATE_ONLY(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
1979 pCur->cAliasedPages++;
1980 Assert(pCur->cAliasedPages <= pCur->cPages);
1981
1982 /*
1983 * Flush its TLB entry.
1984 *
1985 * Not calling IEMTlbInvalidateAllPhysicalAllCpus here as special
1986 * aliased MMIO pages are handled like MMIO by the IEM TLB.
1987 */
1988 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
1989
1990# ifdef VBOX_WITH_NATIVE_NEM
1991 /* Tell NEM about the backing and protection change. */
1992 if (VM_IS_NEM_ENABLED(pVM))
1993 {
1994 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1995 NEMHCNotifyPhysPageChanged(pVM, GCPhysPage, pVM->pgm.s.HCPhysZeroPg, PGM_PAGE_GET_HCPHYS(pPage),
1996 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
1997 pgmPhysPageCalcNemProtection(pPage, PGMPAGETYPE_SPECIAL_ALIAS_MMIO),
1998 PGMPAGETYPE_SPECIAL_ALIAS_MMIO, &u2State);
1999 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
2000 }
2001# endif
2002 LogFlow(("PGMHandlerPhysicalPageAliasHC: => %R[pgmpage]\n", pPage));
2003 PGM_UNLOCK(pVM);
2004 return VINF_SUCCESS;
2005 }
2006 PGM_UNLOCK(pVM);
2007 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n", GCPhysPage, pCur->Key, pCur->KeyLast));
2008 return VERR_INVALID_PARAMETER;
2009 }
2010 PGM_UNLOCK(pVM);
2011
2012 if (rc == VERR_NOT_FOUND)
2013 {
2014 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
2015 return VERR_PGM_HANDLER_NOT_FOUND;
2016 }
2017 return rc;
2018#endif /* !VBOX_WITH_ONLY_PGM_NEM_MODE */
2019}
2020
2021
2022/**
2023 * Checks if a physical range is handled
2024 *
2025 * @returns boolean
2026 * @param pVM The cross context VM structure.
2027 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister().
2028 * @remarks Caller must take the PGM lock...
2029 * @thread EMT.
2030 */
2031VMMDECL(bool) PGMHandlerPhysicalIsRegistered(PVMCC pVM, RTGCPHYS GCPhys)
2032{
2033 /*
2034 * Find the handler.
2035 */
2036 PGM_LOCK_VOID(pVM);
2037 PPGMPHYSHANDLER pCur;
2038 int rc = pgmHandlerPhysicalLookup(pVM, GCPhys, &pCur);
2039 if (RT_SUCCESS(rc))
2040 {
2041#ifdef VBOX_STRICT
2042 Assert(GCPhys >= pCur->Key && GCPhys <= pCur->KeyLast);
2043 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
2044 Assert( pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE
2045 || pCurType->enmKind == PGMPHYSHANDLERKIND_ALL
2046 || pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO);
2047#endif
2048 PGM_UNLOCK(pVM);
2049 return true;
2050 }
2051 PGM_UNLOCK(pVM);
2052 return false;
2053}
2054
2055
2056/**
2057 * Checks if it's an disabled all access handler or write access handler at the
2058 * given address.
2059 *
2060 * @returns true if it's an all access handler, false if it's a write access
2061 * handler.
2062 * @param pVM The cross context VM structure.
2063 * @param GCPhys The address of the page with a disabled handler.
2064 *
2065 * @remarks The caller, PGMR3PhysTlbGCPhys2Ptr, must hold the PGM lock.
2066 */
2067bool pgmHandlerPhysicalIsAll(PVMCC pVM, RTGCPHYS GCPhys)
2068{
2069 PGM_LOCK_VOID(pVM);
2070 PPGMPHYSHANDLER pCur;
2071 int rc = pgmHandlerPhysicalLookup(pVM, GCPhys, &pCur);
2072 AssertRCReturnStmt(rc, PGM_UNLOCK(pVM), true);
2073
2074 /* Only whole pages can be disabled. */
2075 Assert( pCur->Key <= (GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK)
2076 && pCur->KeyLast >= (GCPhys | GUEST_PAGE_OFFSET_MASK));
2077
2078 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
2079 Assert( pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE
2080 || pCurType->enmKind == PGMPHYSHANDLERKIND_ALL
2081 || pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO); /* sanity */
2082 bool const fRet = pCurType->enmKind != PGMPHYSHANDLERKIND_WRITE;
2083 PGM_UNLOCK(pVM);
2084 return fRet;
2085}
2086
2087#ifdef VBOX_STRICT
2088
2089/**
2090 * State structure used by the PGMAssertHandlerAndFlagsInSync() function
2091 * and its AVL enumerators.
2092 */
2093typedef struct PGMAHAFIS
2094{
2095 /** The current physical address. */
2096 RTGCPHYS GCPhys;
2097 /** Number of errors. */
2098 unsigned cErrors;
2099 /** Pointer to the VM. */
2100 PVM pVM;
2101} PGMAHAFIS, *PPGMAHAFIS;
2102
2103
2104/**
2105 * Asserts that the handlers+guest-page-tables == ramrange-flags and
2106 * that the physical addresses associated with virtual handlers are correct.
2107 *
2108 * @returns Number of mismatches.
2109 * @param pVM The cross context VM structure.
2110 */
2111VMMDECL(unsigned) PGMAssertHandlerAndFlagsInSync(PVMCC pVM)
2112{
2113 PGMAHAFIS State;
2114 State.GCPhys = 0;
2115 State.cErrors = 0;
2116 State.pVM = pVM;
2117
2118 PGM_LOCK_ASSERT_OWNER(pVM);
2119
2120 /*
2121 * Check the RAM flags against the handlers.
2122 */
2123 PPGMPHYSHANDLERTREE const pPhysHandlerTree = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree;
2124 uint32_t const cLookupEntries = RT_MIN(pVM->pgm.s.RamRangeUnion.cLookupEntries,
2125 RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup));
2126 for (uint32_t idxLookup = 0; idxLookup < cLookupEntries; idxLookup++)
2127 {
2128 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]);
2129 AssertContinue(idRamRange < RT_ELEMENTS(pVM->pgm.s.apRamRanges));
2130 PPGMRAMRANGE const pRam = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges[idRamRange];
2131 AssertContinue(pRam);
2132 const uint32_t cPages = pRam->cb >> GUEST_PAGE_SHIFT;
2133 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2134 {
2135 PGMPAGE const *pPage = &pRam->aPages[iPage];
2136 if (PGM_PAGE_HAS_ANY_HANDLERS(pPage))
2137 {
2138 State.GCPhys = pRam->GCPhys + (iPage << GUEST_PAGE_SHIFT);
2139
2140 /*
2141 * Physical first - calculate the state based on the handlers
2142 * active on the page, then compare.
2143 */
2144 if (PGM_PAGE_HAS_ANY_PHYSICAL_HANDLERS(pPage))
2145 {
2146 /* the first */
2147 PPGMPHYSHANDLER pPhys;
2148 int rc = pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, State.GCPhys, &pPhys);
2149 if (rc == VERR_NOT_FOUND)
2150 {
2151 rc = pPhysHandlerTree->lookupMatchingOrAbove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator,
2152 State.GCPhys, &pPhys);
2153 if (RT_SUCCESS(rc))
2154 {
2155 Assert(pPhys->Key >= State.GCPhys);
2156 if (pPhys->Key > (State.GCPhys + GUEST_PAGE_SIZE - 1))
2157 pPhys = NULL;
2158 }
2159 else
2160 AssertLogRelMsgReturn(rc == VERR_NOT_FOUND, ("rc=%Rrc GCPhys=%RGp\n", rc, State.GCPhys), 999);
2161 }
2162 else
2163 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("rc=%Rrc GCPhys=%RGp\n", rc, State.GCPhys), 999);
2164
2165 if (pPhys)
2166 {
2167 PCPGMPHYSHANDLERTYPEINT pPhysType = pgmHandlerPhysicalTypeHandleToPtr(pVM, pPhys->hType);
2168 unsigned uState = pPhysType->uState;
2169 bool const fNotInHm = pPhysType->fNotInHm; /* whole pages, so no need to accumulate sub-page configs. */
2170
2171 /* more? */
2172 while (pPhys->KeyLast < (State.GCPhys | GUEST_PAGE_OFFSET_MASK))
2173 {
2174 PPGMPHYSHANDLER pPhys2;
2175 rc = pPhysHandlerTree->lookupMatchingOrAbove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator,
2176 pPhys->KeyLast + 1, &pPhys2);
2177 if (rc == VERR_NOT_FOUND)
2178 break;
2179 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("rc=%Rrc KeyLast+1=%RGp\n", rc, pPhys->KeyLast + 1), 999);
2180 if (pPhys2->Key > (State.GCPhys | GUEST_PAGE_OFFSET_MASK))
2181 break;
2182 PCPGMPHYSHANDLERTYPEINT pPhysType2 = pgmHandlerPhysicalTypeHandleToPtr(pVM, pPhys2->hType);
2183 uState = RT_MAX(uState, pPhysType2->uState);
2184 pPhys = pPhys2;
2185 }
2186
2187 /* compare.*/
2188 if ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != uState
2189 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
2190 {
2191 AssertMsgFailed(("ram range vs phys handler flags mismatch. GCPhys=%RGp state=%d expected=%d %s\n",
2192 State.GCPhys, PGM_PAGE_GET_HNDL_PHYS_STATE(pPage), uState, pPhysType->pszDesc));
2193 State.cErrors++;
2194 }
2195 AssertMsgStmt(PGM_PAGE_IS_HNDL_PHYS_NOT_IN_HM(pPage) == fNotInHm,
2196 ("ram range vs phys handler flags mismatch. GCPhys=%RGp fNotInHm=%d, %d %s\n",
2197 State.GCPhys, PGM_PAGE_IS_HNDL_PHYS_NOT_IN_HM(pPage), fNotInHm, pPhysType->pszDesc),
2198 State.cErrors++);
2199 }
2200 else
2201 {
2202 AssertMsgFailed(("ram range vs phys handler mismatch. no handler for GCPhys=%RGp\n", State.GCPhys));
2203 State.cErrors++;
2204 }
2205 }
2206 }
2207 } /* foreach page in ram range. */
2208 } /* foreach ram range. */
2209
2210 /*
2211 * Do the reverse check for physical handlers.
2212 */
2213 /** @todo */
2214
2215 return State.cErrors;
2216}
2217
2218#endif /* VBOX_STRICT */
2219
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette