VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp@ 99328

Last change on this file since 99328 was 99317, checked in by vboxsync, 23 months ago

VMM/PGM: Nested VMX: bugref:10318 Added PGMHandlerPhysicalRegisterVmxApicAccessPage which holds the PGM lock to fix registeration of the same VMX APIC-access page by multiple VCPUs. [build fix]

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 84.2 KB
Line 
1/* $Id: PGMAllHandler.cpp 99317 2023-04-06 15:24:13Z vboxsync $ */
2/** @file
3 * PGM - Page Manager / Monitor, Access Handlers.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_PGM
33#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
34#include <VBox/vmm/dbgf.h>
35#include <VBox/vmm/pgm.h>
36#include <VBox/vmm/iem.h>
37#include <VBox/vmm/iom.h>
38#include <VBox/vmm/mm.h>
39#include <VBox/vmm/em.h>
40#include <VBox/vmm/nem.h>
41#include <VBox/vmm/stam.h>
42#include <VBox/vmm/dbgf.h>
43#ifdef IN_RING0
44# include <VBox/vmm/pdmdev.h>
45#endif
46#include "PGMInternal.h"
47#include <VBox/vmm/vmcc.h>
48#include "PGMInline.h"
49
50#include <VBox/log.h>
51#include <iprt/assert.h>
52#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
53# include <iprt/asm-amd64-x86.h>
54#endif
55#include <iprt/string.h>
56#include <VBox/param.h>
57#include <VBox/err.h>
58#include <VBox/vmm/selm.h>
59
60
61/*********************************************************************************************************************************
62* Global Variables *
63*********************************************************************************************************************************/
64/** Dummy physical access handler type record. */
65CTX_SUFF(PGMPHYSHANDLERTYPEINT) const g_pgmHandlerPhysicalDummyType =
66{
67 /* .hType = */ UINT64_C(0x93b7557e1937aaff),
68 /* .enmKind = */ PGMPHYSHANDLERKIND_INVALID,
69 /* .uState = */ PGM_PAGE_HNDL_PHYS_STATE_ALL,
70 /* .fKeepPgmLock = */ true,
71 /* .fRing0DevInsIdx = */ false,
72#ifdef IN_RING0
73 /* .fNotInHm = */ false,
74 /* .pfnHandler = */ pgmR0HandlerPhysicalHandlerToRing3,
75 /* .pfnPfHandler = */ pgmR0HandlerPhysicalPfHandlerToRing3,
76#elif defined(IN_RING3)
77 /* .fRing0Enabled = */ false,
78 /* .fNotInHm = */ false,
79 /* .pfnHandler = */ pgmR3HandlerPhysicalHandlerInvalid,
80#else
81# error "unsupported context"
82#endif
83 /* .pszDesc = */ "dummy"
84};
85
86
87/*********************************************************************************************************************************
88* Internal Functions *
89*********************************************************************************************************************************/
90static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVMCC pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam,
91 void *pvBitmap, uint32_t offBitmap);
92static void pgmHandlerPhysicalDeregisterNotifyNEM(PVMCC pVM, PPGMPHYSHANDLER pCur);
93static void pgmHandlerPhysicalResetRamFlags(PVMCC pVM, PPGMPHYSHANDLER pCur);
94
95
96#ifndef IN_RING3
97
98/**
99 * @callback_method_impl{FNPGMPHYSHANDLER,
100 * Dummy for forcing ring-3 handling of the access.}
101 */
102DECLCALLBACK(VBOXSTRICTRC)
103pgmR0HandlerPhysicalHandlerToRing3(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
104 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, uint64_t uUser)
105{
106 RT_NOREF(pVM, pVCpu, GCPhys, pvPhys, pvBuf, cbBuf, enmAccessType, enmOrigin, uUser);
107 return VINF_EM_RAW_EMULATE_INSTR;
108}
109
110
111/**
112 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
113 * Dummy for forcing ring-3 handling of the access.}
114 */
115DECLCALLBACK(VBOXSTRICTRC)
116pgmR0HandlerPhysicalPfHandlerToRing3(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTX pCtx,
117 RTGCPTR pvFault, RTGCPHYS GCPhysFault, uint64_t uUser)
118{
119 RT_NOREF(pVM, pVCpu, uErrorCode, pCtx, pvFault, GCPhysFault, uUser);
120 return VINF_EM_RAW_EMULATE_INSTR;
121}
122
123#endif /* !IN_RING3 */
124
125
126/**
127 * Worker for pgmHandlerPhysicalExCreate.
128 *
129 * @returns A new physical handler on success or NULL on failure.
130 * @param pVM The cross context VM structure.
131 * @param pType The physical handler type.
132 * @param hType The physical handler type registeration handle.
133 * @param uUser User argument to the handlers (not pointer).
134 * @param pszDesc Description of this handler. If NULL, the type description
135 * will be used instead.
136 */
137DECL_FORCE_INLINE(PPGMPHYSHANDLER) pgmHandlerPhysicalExCreateWorker(PVMCC pVM, PCPGMPHYSHANDLERTYPEINT pType,
138 PGMPHYSHANDLERTYPE hType, uint64_t uUser,
139 R3PTRTYPE(const char *) pszDesc)
140{
141 PGM_LOCK_ASSERT_OWNER(pVM);
142 PPGMPHYSHANDLER pNew = pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator.allocateNode();
143 if (pNew)
144 {
145 pNew->Key = NIL_RTGCPHYS;
146 pNew->KeyLast = NIL_RTGCPHYS;
147 pNew->cPages = 0;
148 pNew->cAliasedPages = 0;
149 pNew->cTmpOffPages = 0;
150 pNew->uUser = uUser;
151 pNew->hType = hType;
152 pNew->pszDesc = pszDesc != NIL_RTR3PTR ? pszDesc
153#ifdef IN_RING3
154 : pType->pszDesc;
155#else
156 : pVM->pgm.s.aPhysHandlerTypes[hType & PGMPHYSHANDLERTYPE_IDX_MASK].pszDesc;
157 NOREF(pType);
158#endif
159 }
160 return pNew;
161}
162
163
164/**
165 * Creates a physical access handler, allocation part.
166 *
167 * @returns VBox status code.
168 * @retval VERR_OUT_OF_RESOURCES if no more handlers available.
169 *
170 * @param pVM The cross context VM structure.
171 * @param hType The handler type registration handle.
172 * @param uUser User argument to the handlers (not pointer).
173 * @param pszDesc Description of this handler. If NULL, the type
174 * description will be used instead.
175 * @param ppPhysHandler Where to return the access handler structure on
176 * success.
177 */
178int pgmHandlerPhysicalExCreate(PVMCC pVM, PGMPHYSHANDLERTYPE hType, uint64_t uUser,
179 R3PTRTYPE(const char *) pszDesc, PPGMPHYSHANDLER *ppPhysHandler)
180{
181 /*
182 * Validate input.
183 */
184 PCPGMPHYSHANDLERTYPEINT const pType = pgmHandlerPhysicalTypeHandleToPtr(pVM, hType);
185 AssertReturn(pType, VERR_INVALID_HANDLE);
186 AssertReturn(pType->enmKind > PGMPHYSHANDLERKIND_INVALID && pType->enmKind < PGMPHYSHANDLERKIND_END, VERR_INVALID_HANDLE);
187 AssertPtr(ppPhysHandler);
188
189 Log(("pgmHandlerPhysicalExCreate: uUser=%#RX64 hType=%#x (%d, %s) pszDesc=%RHv:%s\n",
190 uUser, hType, pType->enmKind, pType->pszDesc, pszDesc, R3STRING(pszDesc)));
191
192 /*
193 * Allocate and initialize the new entry.
194 */
195 int rc = PGM_LOCK(pVM);
196 AssertRCReturn(rc, rc);
197 *ppPhysHandler = pgmHandlerPhysicalExCreateWorker(pVM, pType, hType, uUser, pszDesc);
198 PGM_UNLOCK(pVM);
199 if (*ppPhysHandler)
200 return VINF_SUCCESS;
201 return VERR_OUT_OF_RESOURCES;
202}
203
204
205/**
206 * Duplicates a physical access handler.
207 *
208 * @returns VBox status code.
209 * @retval VINF_SUCCESS when successfully installed.
210 *
211 * @param pVM The cross context VM structure.
212 * @param pPhysHandlerSrc The source handler to duplicate
213 * @param ppPhysHandler Where to return the access handler structure on
214 * success.
215 */
216int pgmHandlerPhysicalExDup(PVMCC pVM, PPGMPHYSHANDLER pPhysHandlerSrc, PPGMPHYSHANDLER *ppPhysHandler)
217{
218 return pgmHandlerPhysicalExCreate(pVM, pPhysHandlerSrc->hType, pPhysHandlerSrc->uUser,
219 pPhysHandlerSrc->pszDesc, ppPhysHandler);
220}
221
222
223/**
224 * Register a access handler for a physical range.
225 *
226 * @returns VBox status code.
227 * @retval VINF_SUCCESS when successfully installed.
228 * @retval VINF_PGM_GCPHYS_ALIASED could be returned.
229 *
230 * @param pVM The cross context VM structure.
231 * @param pPhysHandler The physical handler.
232 * @param GCPhys Start physical address.
233 * @param GCPhysLast Last physical address. (inclusive)
234 */
235int pgmHandlerPhysicalExRegister(PVMCC pVM, PPGMPHYSHANDLER pPhysHandler, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast)
236{
237 /*
238 * Validate input.
239 */
240 AssertReturn(pPhysHandler, VERR_INVALID_POINTER);
241 PGMPHYSHANDLERTYPE const hType = pPhysHandler->hType;
242 PCPGMPHYSHANDLERTYPEINT const pType = pgmHandlerPhysicalTypeHandleToPtr(pVM, hType);
243 AssertReturn(pType, VERR_INVALID_HANDLE);
244 AssertReturn(pType->enmKind > PGMPHYSHANDLERKIND_INVALID && pType->enmKind < PGMPHYSHANDLERKIND_END, VERR_INVALID_HANDLE);
245
246 AssertPtr(pPhysHandler);
247
248 Log(("pgmHandlerPhysicalExRegister: GCPhys=%RGp GCPhysLast=%RGp hType=%#x (%d, %s) pszDesc=%RHv:%s\n", GCPhys, GCPhysLast,
249 hType, pType->enmKind, pType->pszDesc, pPhysHandler->pszDesc, R3STRING(pPhysHandler->pszDesc)));
250 AssertReturn(pPhysHandler->Key == NIL_RTGCPHYS, VERR_WRONG_ORDER);
251
252 AssertMsgReturn(GCPhys < GCPhysLast, ("GCPhys >= GCPhysLast (%#x >= %#x)\n", GCPhys, GCPhysLast), VERR_INVALID_PARAMETER);
253 Assert(GCPhysLast - GCPhys < _4G); /* ASSUMPTION in PGMAllPhys.cpp */
254
255 switch (pType->enmKind)
256 {
257 case PGMPHYSHANDLERKIND_WRITE:
258 if (!pType->fNotInHm)
259 break;
260 RT_FALL_THRU(); /* Simplification: fNotInHm can only be used with full pages */
261 case PGMPHYSHANDLERKIND_MMIO:
262 case PGMPHYSHANDLERKIND_ALL:
263 /* Simplification for PGMPhysRead, PGMR0Trap0eHandlerNPMisconfig and others: Full pages. */
264 AssertMsgReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_INVALID_PARAMETER);
265 AssertMsgReturn((GCPhysLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK, ("%RGp\n", GCPhysLast), VERR_INVALID_PARAMETER);
266 break;
267 default:
268 AssertMsgFailed(("Invalid input enmKind=%d!\n", pType->enmKind));
269 return VERR_INVALID_PARAMETER;
270 }
271
272 /*
273 * We require the range to be within registered ram.
274 * There is no apparent need to support ranges which cover more than one ram range.
275 */
276 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
277 if ( !pRam
278 || GCPhysLast > pRam->GCPhysLast)
279 {
280#ifdef IN_RING3
281 DBGFR3Info(pVM->pUVM, "phys", NULL, NULL);
282#endif
283 AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));
284 return VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
285 }
286 Assert(GCPhys >= pRam->GCPhys && GCPhys < pRam->GCPhysLast);
287 Assert(GCPhysLast <= pRam->GCPhysLast && GCPhysLast >= pRam->GCPhys);
288
289 /*
290 * Try insert into list.
291 */
292 pPhysHandler->Key = GCPhys;
293 pPhysHandler->KeyLast = GCPhysLast;
294 pPhysHandler->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
295
296 int rc = PGM_LOCK(pVM);
297 if (RT_SUCCESS(rc))
298 {
299 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->insert(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, pPhysHandler);
300 if (RT_SUCCESS(rc))
301 {
302 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pPhysHandler, pRam, NULL /*pvBitmap*/, 0 /*offBitmap*/);
303 if (rc == VINF_PGM_SYNC_CR3)
304 rc = VINF_PGM_GCPHYS_ALIASED;
305
306#if defined(IN_RING3) || defined(IN_RING0)
307 NEMHCNotifyHandlerPhysicalRegister(pVM, pType->enmKind, GCPhys, GCPhysLast - GCPhys + 1);
308#endif
309 PGM_UNLOCK(pVM);
310
311 if (rc != VINF_SUCCESS)
312 Log(("PGMHandlerPhysicalRegisterEx: returns %Rrc (%RGp-%RGp)\n", rc, GCPhys, GCPhysLast));
313 return rc;
314 }
315 PGM_UNLOCK(pVM);
316 }
317
318 pPhysHandler->Key = NIL_RTGCPHYS;
319 pPhysHandler->KeyLast = NIL_RTGCPHYS;
320
321 AssertMsgReturn(rc == VERR_ALREADY_EXISTS, ("%Rrc GCPhys=%RGp GCPhysLast=%RGp\n", rc, GCPhys, GCPhysLast), rc);
322
323#if defined(IN_RING3) && defined(VBOX_STRICT)
324 DBGFR3Info(pVM->pUVM, "handlers", "phys nostats", NULL);
325#endif
326 AssertMsgFailed(("Conflict! GCPhys=%RGp GCPhysLast=%RGp pszDesc=%s/%s\n",
327 GCPhys, GCPhysLast, R3STRING(pPhysHandler->pszDesc), R3STRING(pType->pszDesc)));
328 return VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
329}
330
331
332/**
333 * Worker for pgmHandlerPhysicalRegisterVmxApicAccessPage.
334 *
335 * @returns VBox status code.
336 * @retval VINF_SUCCESS when successfully installed.
337 * @retval VINF_PGM_GCPHYS_ALIASED could be returned.
338 *
339 * @param pVM The cross context VM structure.
340 * @param pPhysHandler The physical handler.
341 * @param GCPhys The address of the virtual VMX APIC-access page.
342 */
343static int pgmHandlerPhysicalRegisterVmxApicAccessPage(PVMCC pVM, PPGMPHYSHANDLER pPhysHandler, RTGCPHYS GCPhys)
344{
345 PGM_LOCK_ASSERT_OWNER(pVM);
346 LogFunc(("GCPhys=%RGp\n", GCPhys));
347
348 /*
349 * We require the range to be within registered ram.
350 * There is no apparent need to support ranges which cover more than one ram range.
351 */
352 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
353 RTGCPHYS const GCPhysLast = GCPhys | X86_PAGE_4K_OFFSET_MASK;
354 if ( !pRam
355 || GCPhysLast > pRam->GCPhysLast)
356 {
357#ifdef IN_RING3
358 DBGFR3Info(pVM->pUVM, "phys", NULL, NULL);
359#endif
360 AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));
361 return VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
362 }
363 Assert(GCPhys >= pRam->GCPhys && GCPhys < pRam->GCPhysLast);
364 Assert(GCPhysLast <= pRam->GCPhysLast && GCPhysLast >= pRam->GCPhys);
365
366 /*
367 * Try insert into list.
368 */
369 pPhysHandler->Key = GCPhys;
370 pPhysHandler->KeyLast = GCPhysLast;
371 pPhysHandler->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
372
373 int rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->insert(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, pPhysHandler);
374 if (RT_SUCCESS(rc))
375 {
376 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pPhysHandler, pRam, NULL /*pvBitmap*/, 0 /*offBitmap*/);
377 if (rc == VINF_PGM_SYNC_CR3)
378 rc = VINF_PGM_GCPHYS_ALIASED;
379
380#if defined(IN_RING3) || defined(IN_RING0)
381 NEMHCNotifyHandlerPhysicalRegister(pVM, PGMPHYSHANDLERKIND_ALL, GCPhys, GCPhysLast - GCPhys + 1);
382#endif
383 return rc;
384 }
385
386 pPhysHandler->Key = NIL_RTGCPHYS;
387 pPhysHandler->KeyLast = NIL_RTGCPHYS;
388
389 AssertMsgReturn(rc == VERR_ALREADY_EXISTS, ("%Rrc GCPhys=%RGp GCPhysLast=%RGp\n", rc, GCPhys, GCPhysLast), rc);
390#if defined(IN_RING3) && defined(VBOX_STRICT)
391 DBGFR3Info(pVM->pUVM, "handlers", "phys nostats", NULL);
392#endif
393 AssertMsgFailed(("Conflict! GCPhys=%RGp GCPhysLast=%RGp\n", GCPhys, GCPhysLast));
394 return VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
395}
396
397
398/**
399 * Register a access handler for a physical range.
400 *
401 * @returns VBox status code.
402 * @retval VINF_SUCCESS when successfully installed.
403 * @retval VINF_PGM_GCPHYS_ALIASED when the shadow PTs could be updated because
404 * the guest page aliased or/and mapped by multiple PTs. A CR3 sync has been
405 * flagged together with a pool clearing.
406 * @retval VERR_PGM_HANDLER_PHYSICAL_CONFLICT if the range conflicts with an existing
407 * one. A debug assertion is raised.
408 *
409 * @param pVM The cross context VM structure.
410 * @param GCPhys Start physical address.
411 * @param GCPhysLast Last physical address. (inclusive)
412 * @param hType The handler type registration handle.
413 * @param uUser User argument to the handler.
414 * @param pszDesc Description of this handler. If NULL, the type
415 * description will be used instead.
416 */
417VMMDECL(int) PGMHandlerPhysicalRegister(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast, PGMPHYSHANDLERTYPE hType,
418 uint64_t uUser, R3PTRTYPE(const char *) pszDesc)
419{
420#ifdef LOG_ENABLED
421 PCPGMPHYSHANDLERTYPEINT pType = pgmHandlerPhysicalTypeHandleToPtr(pVM, hType);
422 Log(("PGMHandlerPhysicalRegister: GCPhys=%RGp GCPhysLast=%RGp uUser=%#RX64 hType=%#x (%d, %s) pszDesc=%RHv:%s\n",
423 GCPhys, GCPhysLast, uUser, hType, pType->enmKind, R3STRING(pType->pszDesc), pszDesc, R3STRING(pszDesc)));
424#endif
425
426 PPGMPHYSHANDLER pNew;
427 int rc = pgmHandlerPhysicalExCreate(pVM, hType, uUser, pszDesc, &pNew);
428 if (RT_SUCCESS(rc))
429 {
430 rc = pgmHandlerPhysicalExRegister(pVM, pNew, GCPhys, GCPhysLast);
431 if (RT_SUCCESS(rc))
432 return rc;
433 pgmHandlerPhysicalExDestroy(pVM, pNew);
434 }
435 return rc;
436}
437
438
439/**
440 * Register an access handler for a virtual VMX APIC-access page.
441 *
442 * This holds the PGM lock across the whole operation to resolve races between
443 * VCPUs registering the same page simultaneously. It's also a slightly slimmer
444 * version of the regular registeration function as it's specific to the VMX
445 * APIC-access page.
446 *
447 * @returns VBox status code.
448 * @retval VINF_SUCCESS when successfully installed.
449 * @retval VINF_PGM_GCPHYS_ALIASED when the shadow PTs could be updated because
450 * the guest page aliased or/and mapped by multiple PTs. A CR3 sync has been
451 * flagged together with a pool clearing.
452 * @retval VERR_PGM_HANDLER_PHYSICAL_CONFLICT if the range conflicts with an existing
453 * one. A debug assertion is raised.
454 *
455 * @param pVM The cross context VM structure.
456 * @param GCPhys Start physical address.
457 * @param hType The handler type registration handle.
458 */
459VMMDECL(int) PGMHandlerPhysicalRegisterVmxApicAccessPage(PVMCC pVM, RTGCPHYS GCPhys, PGMPHYSHANDLERTYPE hType)
460{
461 PCPGMPHYSHANDLERTYPEINT const pType = pgmHandlerPhysicalTypeHandleToPtr(pVM, hType);
462 AssertReturn(pType, VERR_INVALID_HANDLE);
463 AssertReturn(pType->enmKind == PGMPHYSHANDLERKIND_ALL, VERR_INVALID_HANDLE);
464 AssertMsgReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_INVALID_PARAMETER);
465
466 /*
467 * Find if the VMX APIC access page has already been registered at this address.
468 */
469 int rc = PGM_LOCK_VOID(pVM);
470 AssertRCReturn(rc, rc);
471
472 PPGMPHYSHANDLER pHandler;
473 rc = pgmHandlerPhysicalLookup(pVM, GCPhys, &pHandler);
474 if (RT_SUCCESS(rc))
475 {
476 PCPGMPHYSHANDLERTYPEINT const pHandlerType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pHandler);
477 Assert(GCPhys >= pHandler->Key && GCPhys <= pHandler->KeyLast);
478 Assert( pHandlerType->enmKind == PGMPHYSHANDLERKIND_WRITE
479 || pHandlerType->enmKind == PGMPHYSHANDLERKIND_ALL
480 || pHandlerType->enmKind == PGMPHYSHANDLERKIND_MMIO);
481
482 /* Check it's the virtual VMX APIC-access page. */
483 if ( pHandlerType->fNotInHm
484 && pHandlerType->enmKind == PGMPHYSHANDLERKIND_ALL)
485 rc = VINF_SUCCESS;
486 else
487 {
488 rc = VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
489 AssertMsgFailed(("Conflict! GCPhys=%RGp enmKind=%#x fNotInHm=%RTbool\n", GCPhys, pHandlerType->enmKind,
490 pHandlerType->fNotInHm));
491 }
492
493 PGM_UNLOCK(pVM);
494 return rc;
495 }
496
497 /*
498 * Create and register a physical handler for the virtual VMX APIC-access page.
499 */
500 pHandler = pgmHandlerPhysicalExCreateWorker(pVM, pType, hType, 0 /*uUser*/, NULL /*pszDesc*/);
501 if (pHandler)
502 {
503 rc = pgmHandlerPhysicalRegisterVmxApicAccessPage(pVM, pHandler, GCPhys);
504 if (RT_SUCCESS(rc))
505 { /* likely */ }
506 else
507 pgmHandlerPhysicalExDestroy(pVM, pHandler);
508 }
509 else
510 rc = VERR_OUT_OF_RESOURCES;
511
512 PGM_UNLOCK(pVM);
513 return rc;
514}
515
516
517/**
518 * Sets ram range flags and attempts updating shadow PTs.
519 *
520 * @returns VBox status code.
521 * @retval VINF_SUCCESS when shadow PTs was successfully updated.
522 * @retval VINF_PGM_SYNC_CR3 when the shadow PTs could be updated because
523 * the guest page aliased or/and mapped by multiple PTs. FFs set.
524 * @param pVM The cross context VM structure.
525 * @param pCur The physical handler.
526 * @param pRam The RAM range.
527 * @param pvBitmap Dirty bitmap. Optional.
528 * @param offBitmap Dirty bitmap offset.
529 */
530static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVMCC pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam,
531 void *pvBitmap, uint32_t offBitmap)
532{
533 /*
534 * Iterate the guest ram pages updating the flags and flushing PT entries
535 * mapping the page.
536 */
537 bool fFlushTLBs = false;
538 int rc = VINF_SUCCESS;
539 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
540 const unsigned uState = pCurType->uState;
541 uint32_t cPages = pCur->cPages;
542 uint32_t i = (pCur->Key - pRam->GCPhys) >> GUEST_PAGE_SHIFT;
543 for (;;)
544 {
545 PPGMPAGE pPage = &pRam->aPages[i];
546 AssertMsg(pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO || PGM_PAGE_IS_MMIO(pPage),
547 ("%RGp %R[pgmpage]\n", pRam->GCPhys + (i << GUEST_PAGE_SHIFT), pPage));
548
549 /* Only do upgrades. */
550 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) < uState)
551 {
552 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, uState, pCurType->fNotInHm);
553
554 const RTGCPHYS GCPhysPage = pRam->GCPhys + (i << GUEST_PAGE_SHIFT);
555 int rc2 = pgmPoolTrackUpdateGCPhys(pVM, GCPhysPage, pPage,
556 false /* allow updates of PTEs (instead of flushing) */, &fFlushTLBs);
557 if (rc2 != VINF_SUCCESS && rc == VINF_SUCCESS)
558 rc = rc2;
559
560#ifdef VBOX_WITH_NATIVE_NEM
561 /* Tell NEM about the protection update. */
562 if (VM_IS_NEM_ENABLED(pVM))
563 {
564 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
565 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
566 NEMHCNotifyPhysPageProtChanged(pVM, GCPhysPage, PGM_PAGE_GET_HCPHYS(pPage),
567 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
568 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
569 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
570 }
571#endif
572 if (pvBitmap)
573 ASMBitSet(pvBitmap, offBitmap);
574 }
575
576 /* next */
577 if (--cPages == 0)
578 break;
579 i++;
580 offBitmap++;
581 }
582
583 if (fFlushTLBs)
584 {
585 PGM_INVL_ALL_VCPU_TLBS(pVM);
586 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: flushing guest TLBs; rc=%d\n", rc));
587 }
588 else
589 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: doesn't flush guest TLBs. rc=%Rrc; sync flags=%x VMCPU_FF_PGM_SYNC_CR3=%d\n", rc, VMMGetCpu(pVM)->pgm.s.fSyncFlags, VMCPU_FF_IS_SET(VMMGetCpu(pVM), VMCPU_FF_PGM_SYNC_CR3)));
590
591 return rc;
592}
593
594
595/**
596 * Deregister a physical page access handler.
597 *
598 * @returns VBox status code.
599 * @param pVM The cross context VM structure.
600 * @param pPhysHandler The handler to deregister (but not free).
601 */
602int pgmHandlerPhysicalExDeregister(PVMCC pVM, PPGMPHYSHANDLER pPhysHandler)
603{
604 LogFlow(("pgmHandlerPhysicalExDeregister: Removing Range %RGp-%RGp %s\n",
605 pPhysHandler->Key, pPhysHandler->KeyLast, R3STRING(pPhysHandler->pszDesc)));
606
607 int rc = PGM_LOCK(pVM);
608 AssertRCReturn(rc, rc);
609
610 RTGCPHYS const GCPhys = pPhysHandler->Key;
611 AssertReturnStmt(GCPhys != NIL_RTGCPHYS, PGM_UNLOCK(pVM), VERR_PGM_HANDLER_NOT_FOUND);
612
613 /*
614 * Remove the handler from the tree.
615 */
616
617 PPGMPHYSHANDLER pRemoved;
618 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->remove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pRemoved);
619 if (RT_SUCCESS(rc))
620 {
621 if (pRemoved == pPhysHandler)
622 {
623 /*
624 * Clear the page bits, notify the REM about this change and clear
625 * the cache.
626 */
627 pgmHandlerPhysicalResetRamFlags(pVM, pPhysHandler);
628 if (VM_IS_NEM_ENABLED(pVM))
629 pgmHandlerPhysicalDeregisterNotifyNEM(pVM, pPhysHandler);
630 pVM->pgm.s.idxLastPhysHandler = 0;
631
632 pPhysHandler->Key = NIL_RTGCPHYS;
633 pPhysHandler->KeyLast = NIL_RTGCPHYS;
634
635 PGM_UNLOCK(pVM);
636
637 return VINF_SUCCESS;
638 }
639
640 /*
641 * Both of the failure conditions here are considered internal processing
642 * errors because they can only be caused by race conditions or corruption.
643 * If we ever need to handle concurrent deregistration, we have to move
644 * the NIL_RTGCPHYS check inside the PGM lock.
645 */
646 pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->insert(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, pRemoved);
647 }
648
649 PGM_UNLOCK(pVM);
650
651 if (RT_FAILURE(rc))
652 AssertMsgFailed(("Didn't find range starting at %RGp in the tree! %Rrc=rc\n", GCPhys, rc));
653 else
654 AssertMsgFailed(("Found different handle at %RGp in the tree: got %p insteaded of %p\n",
655 GCPhys, pRemoved, pPhysHandler));
656 return VERR_PGM_HANDLER_IPE_1;
657}
658
659
660/**
661 * Destroys (frees) a physical handler.
662 *
663 * The caller must deregister it before destroying it!
664 *
665 * @returns VBox status code.
666 * @param pVM The cross context VM structure.
667 * @param pHandler The handler to free. NULL if ignored.
668 */
669int pgmHandlerPhysicalExDestroy(PVMCC pVM, PPGMPHYSHANDLER pHandler)
670{
671 if (pHandler)
672 {
673 AssertPtr(pHandler);
674 AssertReturn(pHandler->Key == NIL_RTGCPHYS, VERR_WRONG_ORDER);
675
676 int rc = PGM_LOCK(pVM);
677 if (RT_SUCCESS(rc))
678 {
679 rc = pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator.freeNode(pHandler);
680 PGM_UNLOCK(pVM);
681 }
682 return rc;
683 }
684 return VINF_SUCCESS;
685}
686
687
688/**
689 * Deregister a physical page access handler.
690 *
691 * @returns VBox status code.
692 * @param pVM The cross context VM structure.
693 * @param GCPhys Start physical address.
694 */
695VMMDECL(int) PGMHandlerPhysicalDeregister(PVMCC pVM, RTGCPHYS GCPhys)
696{
697 AssertReturn(pVM->VMCC_CTX(pgm).s.pPhysHandlerTree, VERR_PGM_HANDLER_IPE_1);
698
699 /*
700 * Find the handler.
701 */
702 int rc = PGM_LOCK(pVM);
703 AssertRCReturn(rc, rc);
704
705 PPGMPHYSHANDLER pRemoved;
706 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->remove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pRemoved);
707 if (RT_SUCCESS(rc))
708 {
709 Assert(pRemoved->Key == GCPhys);
710 LogFlow(("PGMHandlerPhysicalDeregister: Removing Range %RGp-%RGp %s\n",
711 pRemoved->Key, pRemoved->KeyLast, R3STRING(pRemoved->pszDesc)));
712
713 /*
714 * Clear the page bits, notify the REM about this change and clear
715 * the cache.
716 */
717 pgmHandlerPhysicalResetRamFlags(pVM, pRemoved);
718 if (VM_IS_NEM_ENABLED(pVM))
719 pgmHandlerPhysicalDeregisterNotifyNEM(pVM, pRemoved);
720 pVM->pgm.s.idxLastPhysHandler = 0;
721
722 pRemoved->Key = NIL_RTGCPHYS;
723 rc = pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator.freeNode(pRemoved);
724
725 PGM_UNLOCK(pVM);
726 return rc;
727 }
728
729 PGM_UNLOCK(pVM);
730
731 if (rc == VERR_NOT_FOUND)
732 {
733 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
734 rc = VERR_PGM_HANDLER_NOT_FOUND;
735 }
736 return rc;
737}
738
739
740/**
741 * Shared code with modify.
742 */
743static void pgmHandlerPhysicalDeregisterNotifyNEM(PVMCC pVM, PPGMPHYSHANDLER pCur)
744{
745#ifdef VBOX_WITH_NATIVE_NEM
746 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
747 RTGCPHYS GCPhysStart = pCur->Key;
748 RTGCPHYS GCPhysLast = pCur->KeyLast;
749
750 /*
751 * Page align the range.
752 *
753 * Since we've reset (recalculated) the physical handler state of all pages
754 * we can make use of the page states to figure out whether a page should be
755 * included in the REM notification or not.
756 */
757 if ( (pCur->Key & GUEST_PAGE_OFFSET_MASK)
758 || ((pCur->KeyLast + 1) & GUEST_PAGE_OFFSET_MASK))
759 {
760 Assert(pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO);
761
762 if (GCPhysStart & GUEST_PAGE_OFFSET_MASK)
763 {
764 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhysStart);
765 if ( pPage
766 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
767 {
768 RTGCPHYS GCPhys = (GCPhysStart + (GUEST_PAGE_SIZE - 1)) & X86_PTE_PAE_PG_MASK;
769 if ( GCPhys > GCPhysLast
770 || GCPhys < GCPhysStart)
771 return;
772 GCPhysStart = GCPhys;
773 }
774 else
775 GCPhysStart &= X86_PTE_PAE_PG_MASK;
776 Assert(!pPage || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO); /* these are page aligned atm! */
777 }
778
779 if (GCPhysLast & GUEST_PAGE_OFFSET_MASK)
780 {
781 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhysLast);
782 if ( pPage
783 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
784 {
785 RTGCPHYS GCPhys = (GCPhysLast & X86_PTE_PAE_PG_MASK) - 1;
786 if ( GCPhys < GCPhysStart
787 || GCPhys > GCPhysLast)
788 return;
789 GCPhysLast = GCPhys;
790 }
791 else
792 GCPhysLast |= GUEST_PAGE_OFFSET_MASK;
793 Assert(!pPage || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO); /* these are page aligned atm! */
794 }
795 }
796
797 /*
798 * Tell NEM.
799 */
800 PPGMRAMRANGE const pRam = pgmPhysGetRange(pVM, GCPhysStart);
801 RTGCPHYS const cb = GCPhysLast - GCPhysStart + 1;
802 uint8_t u2State = UINT8_MAX;
803 NEMHCNotifyHandlerPhysicalDeregister(pVM, pCurType->enmKind, GCPhysStart, cb,
804 pRam ? PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysStart) : NULL, &u2State);
805 if (u2State != UINT8_MAX && pRam)
806 pgmPhysSetNemStateForPages(&pRam->aPages[(GCPhysStart - pRam->GCPhys) >> GUEST_PAGE_SHIFT],
807 cb >> GUEST_PAGE_SHIFT, u2State);
808#else
809 RT_NOREF(pVM, pCur);
810#endif
811}
812
813
814/**
815 * pgmHandlerPhysicalResetRamFlags helper that checks for other handlers on
816 * edge pages.
817 */
818DECLINLINE(void) pgmHandlerPhysicalRecalcPageState(PVMCC pVM, RTGCPHYS GCPhys, bool fAbove, PPGMRAMRANGE *ppRamHint)
819{
820 /*
821 * Look for other handlers.
822 */
823 unsigned uState = PGM_PAGE_HNDL_PHYS_STATE_NONE;
824 for (;;)
825 {
826 PPGMPHYSHANDLER pCur;
827 int rc;
828 if (fAbove)
829 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookupMatchingOrAbove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator,
830 GCPhys, &pCur);
831 else
832 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookupMatchingOrBelow(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator,
833 GCPhys, &pCur);
834 if (rc == VERR_NOT_FOUND)
835 break;
836 AssertRCBreak(rc);
837 if (((fAbove ? pCur->Key : pCur->KeyLast) >> GUEST_PAGE_SHIFT) != (GCPhys >> GUEST_PAGE_SHIFT))
838 break;
839 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
840 uState = RT_MAX(uState, pCurType->uState);
841
842 /* next? */
843 RTGCPHYS GCPhysNext = fAbove
844 ? pCur->KeyLast + 1
845 : pCur->Key - 1;
846 if ((GCPhysNext >> GUEST_PAGE_SHIFT) != (GCPhys >> GUEST_PAGE_SHIFT))
847 break;
848 GCPhys = GCPhysNext;
849 }
850
851 /*
852 * Update if we found something that is a higher priority state than the current.
853 * Note! The PGMPHYSHANDLER_F_NOT_IN_HM can be ignored here as it requires whole pages.
854 */
855 if (uState != PGM_PAGE_HNDL_PHYS_STATE_NONE)
856 {
857 PPGMPAGE pPage;
858 int rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, ppRamHint);
859 if ( RT_SUCCESS(rc)
860 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) < uState)
861 {
862 /* This should normally not be necessary. */
863 PGM_PAGE_SET_HNDL_PHYS_STATE_ONLY(pPage, uState);
864 bool fFlushTLBs;
865 rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhys, pPage, false /*fFlushPTEs*/, &fFlushTLBs);
866 if (RT_SUCCESS(rc) && fFlushTLBs)
867 PGM_INVL_ALL_VCPU_TLBS(pVM);
868 else
869 AssertRC(rc);
870
871#ifdef VBOX_WITH_NATIVE_NEM
872 /* Tell NEM about the protection update. */
873 if (VM_IS_NEM_ENABLED(pVM))
874 {
875 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
876 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
877 NEMHCNotifyPhysPageProtChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage),
878 PGM_RAMRANGE_CALC_PAGE_R3PTR(*ppRamHint, GCPhys),
879 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
880 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
881 }
882#endif
883 }
884 else
885 AssertRC(rc);
886 }
887}
888
889
890/**
891 * Resets an aliased page.
892 *
893 * @param pVM The cross context VM structure.
894 * @param pPage The page.
895 * @param GCPhysPage The page address in case it comes in handy.
896 * @param pRam The RAM range the page is associated with (for NEM
897 * notifications).
898 * @param fDoAccounting Whether to perform accounting. (Only set during
899 * reset where pgmR3PhysRamReset doesn't have the
900 * handler structure handy.)
901 * @param fFlushIemTlbs Whether to perform IEM TLB flushing or not. This
902 * can be cleared only if the caller does the flushing
903 * after calling this function.
904 */
905void pgmHandlerPhysicalResetAliasedPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage, PPGMRAMRANGE pRam,
906 bool fDoAccounting, bool fFlushIemTlbs)
907{
908 Assert( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
909 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO);
910 Assert(PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
911#ifdef VBOX_WITH_NATIVE_NEM
912 RTHCPHYS const HCPhysPrev = PGM_PAGE_GET_HCPHYS(pPage);
913#endif
914
915 /*
916 * Flush any shadow page table references *first*.
917 */
918 bool fFlushTLBs = false;
919 int rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhysPage, pPage, true /*fFlushPTEs*/, &fFlushTLBs);
920 AssertLogRelRCReturnVoid(rc);
921#if defined(VBOX_VMM_TARGET_ARMV8)
922 AssertReleaseFailed();
923#else
924 HMFlushTlbOnAllVCpus(pVM);
925#endif
926
927 /*
928 * Make it an MMIO/Zero page.
929 */
930 PGM_PAGE_SET_HCPHYS(pVM, pPage, pVM->pgm.s.HCPhysZeroPg);
931 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_MMIO);
932 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
933 PGM_PAGE_SET_PAGEID(pVM, pPage, NIL_GMM_PAGEID);
934 PGM_PAGE_SET_HNDL_PHYS_STATE_ONLY(pPage, PGM_PAGE_HNDL_PHYS_STATE_ALL);
935
936 /*
937 * Flush its TLB entry.
938 */
939 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
940 if (fFlushIemTlbs)
941 IEMTlbInvalidateAllPhysicalAllCpus(pVM, NIL_VMCPUID);
942
943 /*
944 * Do accounting for pgmR3PhysRamReset.
945 */
946 if (fDoAccounting)
947 {
948 PPGMPHYSHANDLER pHandler;
949 rc = pgmHandlerPhysicalLookup(pVM, GCPhysPage, &pHandler);
950 if (RT_SUCCESS(rc))
951 {
952 Assert(pHandler->cAliasedPages > 0);
953 pHandler->cAliasedPages--;
954 }
955 else
956 AssertMsgFailed(("rc=%Rrc GCPhysPage=%RGp\n", rc, GCPhysPage));
957 }
958
959#ifdef VBOX_WITH_NATIVE_NEM
960 /*
961 * Tell NEM about the protection change.
962 */
963 if (VM_IS_NEM_ENABLED(pVM))
964 {
965 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
966 NEMHCNotifyPhysPageChanged(pVM, GCPhysPage, HCPhysPrev, pVM->pgm.s.HCPhysZeroPg,
967 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
968 NEM_PAGE_PROT_NONE, PGMPAGETYPE_MMIO, &u2State);
969 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
970 }
971#else
972 RT_NOREF(pRam);
973#endif
974}
975
976
977/**
978 * Resets ram range flags.
979 *
980 * @returns VBox status code.
981 * @retval VINF_SUCCESS when shadow PTs was successfully updated.
982 * @param pVM The cross context VM structure.
983 * @param pCur The physical handler.
984 *
985 * @remark We don't start messing with the shadow page tables, as we've
986 * already got code in Trap0e which deals with out of sync handler
987 * flags (originally conceived for global pages).
988 */
989static void pgmHandlerPhysicalResetRamFlags(PVMCC pVM, PPGMPHYSHANDLER pCur)
990{
991 /*
992 * Iterate the guest ram pages updating the state.
993 */
994 RTUINT cPages = pCur->cPages;
995 RTGCPHYS GCPhys = pCur->Key;
996 PPGMRAMRANGE pRamHint = NULL;
997 for (;;)
998 {
999 PPGMPAGE pPage;
1000 int rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, &pRamHint);
1001 if (RT_SUCCESS(rc))
1002 {
1003 /* Reset aliased MMIO pages to MMIO, since this aliasing is our business.
1004 (We don't flip MMIO to RAM though, that's PGMPhys.cpp's job.) */
1005 bool fNemNotifiedAlready = false;
1006 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
1007 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO)
1008 {
1009 Assert(pCur->cAliasedPages > 0);
1010 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhys, pRamHint, false /*fDoAccounting*/, true /*fFlushIemTlbs*/);
1011 pCur->cAliasedPages--;
1012 fNemNotifiedAlready = true;
1013 }
1014#ifdef VBOX_STRICT
1015 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
1016 AssertMsg(pCurType && (pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO || PGM_PAGE_IS_MMIO(pPage)),
1017 ("%RGp %R[pgmpage]\n", GCPhys, pPage));
1018#endif
1019 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_NONE, false);
1020
1021#ifdef VBOX_WITH_NATIVE_NEM
1022 /* Tell NEM about the protection change. */
1023 if (VM_IS_NEM_ENABLED(pVM) && !fNemNotifiedAlready)
1024 {
1025 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1026 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
1027 NEMHCNotifyPhysPageProtChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage),
1028 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRamHint, GCPhys),
1029 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
1030 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1031 }
1032#endif
1033 RT_NOREF(fNemNotifiedAlready);
1034 }
1035 else
1036 AssertRC(rc);
1037
1038 /* next */
1039 if (--cPages == 0)
1040 break;
1041 GCPhys += GUEST_PAGE_SIZE;
1042 }
1043
1044 pCur->cAliasedPages = 0;
1045 pCur->cTmpOffPages = 0;
1046
1047 /*
1048 * Check for partial start and end pages.
1049 */
1050 if (pCur->Key & GUEST_PAGE_OFFSET_MASK)
1051 pgmHandlerPhysicalRecalcPageState(pVM, pCur->Key - 1, false /* fAbove */, &pRamHint);
1052 if ((pCur->KeyLast & GUEST_PAGE_OFFSET_MASK) != GUEST_PAGE_OFFSET_MASK)
1053 pgmHandlerPhysicalRecalcPageState(pVM, pCur->KeyLast + 1, true /* fAbove */, &pRamHint);
1054}
1055
1056
1057#if 0 /* unused */
1058/**
1059 * Modify a physical page access handler.
1060 *
1061 * Modification can only be done to the range it self, not the type or anything else.
1062 *
1063 * @returns VBox status code.
1064 * For all return codes other than VERR_PGM_HANDLER_NOT_FOUND and VINF_SUCCESS the range is deregistered
1065 * and a new registration must be performed!
1066 * @param pVM The cross context VM structure.
1067 * @param GCPhysCurrent Current location.
1068 * @param GCPhys New location.
1069 * @param GCPhysLast New last location.
1070 */
1071VMMDECL(int) PGMHandlerPhysicalModify(PVMCC pVM, RTGCPHYS GCPhysCurrent, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast)
1072{
1073 /*
1074 * Remove it.
1075 */
1076 int rc;
1077 PGM_LOCK_VOID(pVM);
1078 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhysCurrent);
1079 if (pCur)
1080 {
1081 /*
1082 * Clear the ram flags. (We're gonna move or free it!)
1083 */
1084 pgmHandlerPhysicalResetRamFlags(pVM, pCur);
1085 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1086 @todo pCurType validation
1087 bool const fRestoreAsRAM = pCurType->pfnHandlerR3 /** @todo this isn't entirely correct. */
1088 && pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO;
1089
1090 /*
1091 * Validate the new range, modify and reinsert.
1092 */
1093 if (GCPhysLast >= GCPhys)
1094 {
1095 /*
1096 * We require the range to be within registered ram.
1097 * There is no apparent need to support ranges which cover more than one ram range.
1098 */
1099 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
1100 if ( pRam
1101 && GCPhys <= pRam->GCPhysLast
1102 && GCPhysLast >= pRam->GCPhys)
1103 {
1104 pCur->Core.Key = GCPhys;
1105 pCur->Core.KeyLast = GCPhysLast;
1106 pCur->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + 1) >> GUEST_PAGE_SHIFT;
1107
1108 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pCur->Core))
1109 {
1110 RTGCPHYS const cb = GCPhysLast - GCPhys + 1;
1111 PGMPHYSHANDLERKIND const enmKind = pCurType->enmKind;
1112
1113 /*
1114 * Set ram flags, flush shadow PT entries and finally tell REM about this.
1115 */
1116 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam, NULL, 0);
1117
1118 /** @todo NEM: not sure we need this notification... */
1119 NEMHCNotifyHandlerPhysicalModify(pVM, enmKind, GCPhysCurrent, GCPhys, cb, fRestoreAsRAM);
1120
1121 PGM_UNLOCK(pVM);
1122
1123 PGM_INVL_ALL_VCPU_TLBS(pVM);
1124 Log(("PGMHandlerPhysicalModify: GCPhysCurrent=%RGp -> GCPhys=%RGp GCPhysLast=%RGp\n",
1125 GCPhysCurrent, GCPhys, GCPhysLast));
1126 return VINF_SUCCESS;
1127 }
1128
1129 AssertMsgFailed(("Conflict! GCPhys=%RGp GCPhysLast=%RGp\n", GCPhys, GCPhysLast));
1130 rc = VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
1131 }
1132 else
1133 {
1134 AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));
1135 rc = VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
1136 }
1137 }
1138 else
1139 {
1140 AssertMsgFailed(("Invalid range %RGp-%RGp\n", GCPhys, GCPhysLast));
1141 rc = VERR_INVALID_PARAMETER;
1142 }
1143
1144 /*
1145 * Invalid new location, flush the cache and free it.
1146 * We've only gotta notify REM and free the memory.
1147 */
1148 if (VM_IS_NEM_ENABLED(pVM))
1149 pgmHandlerPhysicalDeregisterNotifyNEM(pVM, pCur);
1150 pVM->pgm.s.pLastPhysHandlerR0 = 0;
1151 pVM->pgm.s.pLastPhysHandlerR3 = 0;
1152 PGMHandlerPhysicalTypeRelease(pVM, pCur->hType);
1153 MMHyperFree(pVM, pCur);
1154 }
1155 else
1156 {
1157 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhysCurrent));
1158 rc = VERR_PGM_HANDLER_NOT_FOUND;
1159 }
1160
1161 PGM_UNLOCK(pVM);
1162 return rc;
1163}
1164#endif /* unused */
1165
1166
1167/**
1168 * Changes the user callback arguments associated with a physical access handler.
1169 *
1170 * @returns VBox status code.
1171 * @param pVM The cross context VM structure.
1172 * @param GCPhys Start physical address of the handler.
1173 * @param uUser User argument to the handlers.
1174 */
1175VMMDECL(int) PGMHandlerPhysicalChangeUserArg(PVMCC pVM, RTGCPHYS GCPhys, uint64_t uUser)
1176{
1177 /*
1178 * Find the handler and make the change.
1179 */
1180 int rc = PGM_LOCK(pVM);
1181 AssertRCReturn(rc, rc);
1182
1183 PPGMPHYSHANDLER pCur;
1184 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1185 if (RT_SUCCESS(rc))
1186 {
1187 Assert(pCur->Key == GCPhys);
1188 pCur->uUser = uUser;
1189 }
1190 else if (rc == VERR_NOT_FOUND)
1191 {
1192 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
1193 rc = VERR_PGM_HANDLER_NOT_FOUND;
1194 }
1195
1196 PGM_UNLOCK(pVM);
1197 return rc;
1198}
1199
1200#if 0 /* unused */
1201
1202/**
1203 * Splits a physical access handler in two.
1204 *
1205 * @returns VBox status code.
1206 * @param pVM The cross context VM structure.
1207 * @param GCPhys Start physical address of the handler.
1208 * @param GCPhysSplit The split address.
1209 */
1210VMMDECL(int) PGMHandlerPhysicalSplit(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysSplit)
1211{
1212 AssertReturn(GCPhys < GCPhysSplit, VERR_INVALID_PARAMETER);
1213
1214 /*
1215 * Do the allocation without owning the lock.
1216 */
1217 PPGMPHYSHANDLER pNew;
1218 int rc = MMHyperAlloc(pVM, sizeof(*pNew), 0, MM_TAG_PGM_HANDLERS, (void **)&pNew);
1219 if (RT_FAILURE(rc))
1220 return rc;
1221
1222 /*
1223 * Get the handler.
1224 */
1225 PGM_LOCK_VOID(pVM);
1226 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1227 if (RT_LIKELY(pCur))
1228 {
1229 if (RT_LIKELY(GCPhysSplit <= pCur->Core.KeyLast))
1230 {
1231 /*
1232 * Create new handler node for the 2nd half.
1233 */
1234 *pNew = *pCur;
1235 pNew->Core.Key = GCPhysSplit;
1236 pNew->cPages = (pNew->Core.KeyLast - (pNew->Core.Key & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
1237
1238 pCur->Core.KeyLast = GCPhysSplit - 1;
1239 pCur->cPages = (pCur->Core.KeyLast - (pCur->Core.Key & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
1240
1241 if (RT_LIKELY(RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pNew->Core)))
1242 {
1243 LogFlow(("PGMHandlerPhysicalSplit: %RGp-%RGp and %RGp-%RGp\n",
1244 pCur->Core.Key, pCur->Core.KeyLast, pNew->Core.Key, pNew->Core.KeyLast));
1245 PGM_UNLOCK(pVM);
1246 return VINF_SUCCESS;
1247 }
1248 AssertMsgFailed(("whu?\n"));
1249 rc = VERR_PGM_PHYS_HANDLER_IPE;
1250 }
1251 else
1252 {
1253 AssertMsgFailed(("outside range: %RGp-%RGp split %RGp\n", pCur->Core.Key, pCur->Core.KeyLast, GCPhysSplit));
1254 rc = VERR_INVALID_PARAMETER;
1255 }
1256 }
1257 else
1258 {
1259 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
1260 rc = VERR_PGM_HANDLER_NOT_FOUND;
1261 }
1262 PGM_UNLOCK(pVM);
1263 MMHyperFree(pVM, pNew);
1264 return rc;
1265}
1266
1267
1268/**
1269 * Joins up two adjacent physical access handlers which has the same callbacks.
1270 *
1271 * @returns VBox status code.
1272 * @param pVM The cross context VM structure.
1273 * @param GCPhys1 Start physical address of the first handler.
1274 * @param GCPhys2 Start physical address of the second handler.
1275 */
1276VMMDECL(int) PGMHandlerPhysicalJoin(PVMCC pVM, RTGCPHYS GCPhys1, RTGCPHYS GCPhys2)
1277{
1278 /*
1279 * Get the handlers.
1280 */
1281 int rc;
1282 PGM_LOCK_VOID(pVM);
1283 PPGMPHYSHANDLER pCur1 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys1);
1284 if (RT_LIKELY(pCur1))
1285 {
1286 PPGMPHYSHANDLER pCur2 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys2);
1287 if (RT_LIKELY(pCur2))
1288 {
1289 /*
1290 * Make sure that they are adjacent, and that they've got the same callbacks.
1291 */
1292 if (RT_LIKELY(pCur1->Core.KeyLast + 1 == pCur2->Core.Key))
1293 {
1294 if (RT_LIKELY(pCur1->hType == pCur2->hType))
1295 {
1296 PPGMPHYSHANDLER pCur3 = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys2);
1297 if (RT_LIKELY(pCur3 == pCur2))
1298 {
1299 pCur1->Core.KeyLast = pCur2->Core.KeyLast;
1300 pCur1->cPages = (pCur1->Core.KeyLast - (pCur1->Core.Key & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
1301 LogFlow(("PGMHandlerPhysicalJoin: %RGp-%RGp %RGp-%RGp\n",
1302 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
1303 pVM->pgm.s.pLastPhysHandlerR0 = 0;
1304 pVM->pgm.s.pLastPhysHandlerR3 = 0;
1305 PGMHandlerPhysicalTypeRelease(pVM, pCur2->hType);
1306 MMHyperFree(pVM, pCur2);
1307 PGM_UNLOCK(pVM);
1308 return VINF_SUCCESS;
1309 }
1310
1311 Assert(pCur3 == pCur2);
1312 rc = VERR_PGM_PHYS_HANDLER_IPE;
1313 }
1314 else
1315 {
1316 AssertMsgFailed(("mismatching handlers\n"));
1317 rc = VERR_ACCESS_DENIED;
1318 }
1319 }
1320 else
1321 {
1322 AssertMsgFailed(("not adjacent: %RGp-%RGp %RGp-%RGp\n",
1323 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
1324 rc = VERR_INVALID_PARAMETER;
1325 }
1326 }
1327 else
1328 {
1329 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys2));
1330 rc = VERR_PGM_HANDLER_NOT_FOUND;
1331 }
1332 }
1333 else
1334 {
1335 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys1));
1336 rc = VERR_PGM_HANDLER_NOT_FOUND;
1337 }
1338 PGM_UNLOCK(pVM);
1339 return rc;
1340
1341}
1342
1343#endif /* unused */
1344
1345/**
1346 * Resets any modifications to individual pages in a physical page access
1347 * handler region.
1348 *
1349 * This is used in pair with PGMHandlerPhysicalPageTempOff(),
1350 * PGMHandlerPhysicalPageAliasMmio2() or PGMHandlerPhysicalPageAliasHC().
1351 *
1352 * @returns VBox status code.
1353 * @param pVM The cross context VM structure.
1354 * @param GCPhys The start address of the handler regions, i.e. what you
1355 * passed to PGMR3HandlerPhysicalRegister(),
1356 * PGMHandlerPhysicalRegisterEx() or
1357 * PGMHandlerPhysicalModify().
1358 */
1359VMMDECL(int) PGMHandlerPhysicalReset(PVMCC pVM, RTGCPHYS GCPhys)
1360{
1361 LogFlow(("PGMHandlerPhysicalReset GCPhys=%RGp\n", GCPhys));
1362 int rc = PGM_LOCK(pVM);
1363 AssertRCReturn(rc, rc);
1364
1365 /*
1366 * Find the handler.
1367 */
1368 PPGMPHYSHANDLER pCur;
1369 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1370 if (RT_SUCCESS(rc))
1371 {
1372 Assert(pCur->Key == GCPhys);
1373
1374 /*
1375 * Validate kind.
1376 */
1377 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1378 switch (pCurType->enmKind)
1379 {
1380 case PGMPHYSHANDLERKIND_WRITE:
1381 case PGMPHYSHANDLERKIND_ALL:
1382 case PGMPHYSHANDLERKIND_MMIO: /* NOTE: Only use when clearing MMIO ranges with aliased MMIO2 pages! */
1383 {
1384 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysHandlerReset)); /** @todo move out of switch */
1385 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
1386 Assert(pRam);
1387 Assert(pRam->GCPhys <= pCur->Key);
1388 Assert(pRam->GCPhysLast >= pCur->KeyLast);
1389
1390 if (pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO)
1391 {
1392 /*
1393 * Reset all the PGMPAGETYPE_MMIO2_ALIAS_MMIO pages first and that's it.
1394 * This could probably be optimized a bit wrt to flushing, but I'm too lazy
1395 * to do that now...
1396 */
1397 if (pCur->cAliasedPages)
1398 {
1399 PPGMPAGE pPage = &pRam->aPages[(pCur->Key - pRam->GCPhys) >> GUEST_PAGE_SHIFT];
1400 RTGCPHYS GCPhysPage = pCur->Key;
1401 uint32_t cLeft = pCur->cPages;
1402 bool fFlushIemTlb = false;
1403 while (cLeft-- > 0)
1404 {
1405 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
1406 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO)
1407 {
1408 fFlushIemTlb |= PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO;
1409 Assert(pCur->cAliasedPages > 0);
1410 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhysPage, pRam,
1411 false /*fDoAccounting*/, false /*fFlushIemTlbs*/);
1412 --pCur->cAliasedPages;
1413#ifndef VBOX_STRICT
1414 if (pCur->cAliasedPages == 0)
1415 break;
1416#endif
1417 }
1418 Assert(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO);
1419 GCPhysPage += GUEST_PAGE_SIZE;
1420 pPage++;
1421 }
1422 Assert(pCur->cAliasedPages == 0);
1423
1424 /*
1425 * Flush IEM TLBs in case they contain any references to aliased pages.
1426 * This is only necessary for MMIO2 aliases.
1427 */
1428 if (fFlushIemTlb)
1429 IEMTlbInvalidateAllPhysicalAllCpus(pVM, NIL_VMCPUID);
1430 }
1431 }
1432 else if (pCur->cTmpOffPages > 0)
1433 {
1434 /*
1435 * Set the flags and flush shadow PT entries.
1436 */
1437 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam, NULL /*pvBitmap*/, 0 /*offBitmap*/);
1438 }
1439
1440 pCur->cAliasedPages = 0;
1441 pCur->cTmpOffPages = 0;
1442
1443 rc = VINF_SUCCESS;
1444 break;
1445 }
1446
1447 /*
1448 * Invalid.
1449 */
1450 default:
1451 AssertMsgFailed(("Invalid type %d/%#x! Corruption!\n", pCurType->enmKind, pCur->hType));
1452 rc = VERR_PGM_PHYS_HANDLER_IPE;
1453 break;
1454 }
1455 }
1456 else if (rc == VERR_NOT_FOUND)
1457 {
1458 AssertMsgFailed(("Didn't find MMIO Range starting at %#x\n", GCPhys));
1459 rc = VERR_PGM_HANDLER_NOT_FOUND;
1460 }
1461
1462 PGM_UNLOCK(pVM);
1463 return rc;
1464}
1465
1466
1467/**
1468 * Special version of PGMHandlerPhysicalReset used by MMIO2 w/ dirty page
1469 * tracking.
1470 *
1471 * @returns VBox status code.
1472 * @param pVM The cross context VM structure.
1473 * @param GCPhys The start address of the handler region.
1474 * @param pvBitmap Dirty bitmap. Caller has cleared this already, only
1475 * dirty bits will be set. Caller also made sure it's big
1476 * enough.
1477 * @param offBitmap Dirty bitmap offset.
1478 * @remarks Caller must own the PGM critical section.
1479 */
1480DECLHIDDEN(int) pgmHandlerPhysicalResetMmio2WithBitmap(PVMCC pVM, RTGCPHYS GCPhys, void *pvBitmap, uint32_t offBitmap)
1481{
1482 LogFlow(("pgmHandlerPhysicalResetMmio2WithBitmap GCPhys=%RGp\n", GCPhys));
1483 PGM_LOCK_ASSERT_OWNER(pVM);
1484
1485 /*
1486 * Find the handler.
1487 */
1488 PPGMPHYSHANDLER pCur;
1489 int rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1490 if (RT_SUCCESS(rc))
1491 {
1492 Assert(pCur->Key == GCPhys);
1493
1494 /*
1495 * Validate kind.
1496 */
1497 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
1498 if ( pCurType
1499 && pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE)
1500 {
1501 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysHandlerReset));
1502
1503 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
1504 Assert(pRam);
1505 Assert(pRam->GCPhys <= pCur->Key);
1506 Assert(pRam->GCPhysLast >= pCur->KeyLast);
1507
1508 /*
1509 * Set the flags and flush shadow PT entries.
1510 */
1511 if (pCur->cTmpOffPages > 0)
1512 {
1513 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam, pvBitmap, offBitmap);
1514 pCur->cTmpOffPages = 0;
1515 }
1516 else
1517 rc = VINF_SUCCESS;
1518 }
1519 else
1520 {
1521 AssertFailed();
1522 rc = VERR_WRONG_TYPE;
1523 }
1524 }
1525 else if (rc == VERR_NOT_FOUND)
1526 {
1527 AssertMsgFailed(("Didn't find MMIO Range starting at %#x\n", GCPhys));
1528 rc = VERR_PGM_HANDLER_NOT_FOUND;
1529 }
1530
1531 return rc;
1532}
1533
1534
1535/**
1536 * Temporarily turns off the access monitoring of a page within a monitored
1537 * physical write/all page access handler region.
1538 *
1539 * Use this when no further \#PFs are required for that page. Be aware that
1540 * a page directory sync might reset the flags, and turn on access monitoring
1541 * for the page.
1542 *
1543 * The caller must do required page table modifications.
1544 *
1545 * @returns VBox status code.
1546 * @param pVM The cross context VM structure.
1547 * @param GCPhys The start address of the access handler. This
1548 * must be a fully page aligned range or we risk
1549 * messing up other handlers installed for the
1550 * start and end pages.
1551 * @param GCPhysPage The physical address of the page to turn off
1552 * access monitoring for.
1553 */
1554VMMDECL(int) PGMHandlerPhysicalPageTempOff(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage)
1555{
1556 LogFlow(("PGMHandlerPhysicalPageTempOff GCPhysPage=%RGp\n", GCPhysPage));
1557 int rc = PGM_LOCK(pVM);
1558 AssertRCReturn(rc, rc);
1559
1560 /*
1561 * Validate the range.
1562 */
1563 PPGMPHYSHANDLER pCur;
1564 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1565 if (RT_SUCCESS(rc))
1566 {
1567 Assert(pCur->Key == GCPhys);
1568 if (RT_LIKELY( GCPhysPage >= pCur->Key
1569 && GCPhysPage <= pCur->KeyLast))
1570 {
1571 Assert(!(pCur->Key & GUEST_PAGE_OFFSET_MASK));
1572 Assert((pCur->KeyLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK);
1573
1574 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
1575 AssertReturnStmt( pCurType
1576 && ( pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE
1577 || pCurType->enmKind == PGMPHYSHANDLERKIND_ALL),
1578 PGM_UNLOCK(pVM), VERR_ACCESS_DENIED);
1579
1580 /*
1581 * Change the page status.
1582 */
1583 PPGMPAGE pPage;
1584 PPGMRAMRANGE pRam;
1585 rc = pgmPhysGetPageAndRangeEx(pVM, GCPhysPage, &pPage, &pRam);
1586 AssertReturnStmt(RT_SUCCESS_NP(rc), PGM_UNLOCK(pVM), rc);
1587 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
1588 {
1589 PGM_PAGE_SET_HNDL_PHYS_STATE_ONLY(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
1590 pCur->cTmpOffPages++;
1591
1592#ifdef VBOX_WITH_NATIVE_NEM
1593 /* Tell NEM about the protection change (VGA is using this to track dirty pages). */
1594 if (VM_IS_NEM_ENABLED(pVM))
1595 {
1596 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1597 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
1598 NEMHCNotifyPhysPageProtChanged(pVM, GCPhysPage, PGM_PAGE_GET_HCPHYS(pPage),
1599 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
1600 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
1601 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1602 }
1603#endif
1604 }
1605 PGM_UNLOCK(pVM);
1606 return VINF_SUCCESS;
1607 }
1608 PGM_UNLOCK(pVM);
1609 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n", GCPhysPage, pCur->Key, pCur->KeyLast));
1610 return VERR_INVALID_PARAMETER;
1611 }
1612 PGM_UNLOCK(pVM);
1613
1614 if (rc == VERR_NOT_FOUND)
1615 {
1616 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
1617 return VERR_PGM_HANDLER_NOT_FOUND;
1618 }
1619 return rc;
1620}
1621
1622
1623/**
1624 * Resolves an MMIO2 page.
1625 *
1626 * Caller as taken the PGM lock.
1627 *
1628 * @returns Pointer to the page if valid, NULL otherwise
1629 * @param pVM The cross context VM structure.
1630 * @param pDevIns The device owning it.
1631 * @param hMmio2 The MMIO2 region.
1632 * @param offMmio2Page The offset into the region.
1633 */
1634static PPGMPAGE pgmPhysResolveMmio2PageLocked(PVMCC pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, RTGCPHYS offMmio2Page)
1635{
1636 /* Only works if the handle is in the handle table! */
1637 AssertReturn(hMmio2 != 0, NULL);
1638 hMmio2--;
1639
1640 /* Must check the first one for PGMREGMMIO2RANGE_F_FIRST_CHUNK. */
1641 AssertReturn(hMmio2 < RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3), NULL);
1642 PPGMREGMMIO2RANGE pCur = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[hMmio2];
1643 AssertReturn(pCur, NULL);
1644 AssertReturn(pCur->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK, NULL);
1645
1646 /* Loop thru the sub-ranges till we find the one covering offMmio2. */
1647 for (;;)
1648 {
1649#ifdef IN_RING3
1650 AssertReturn(pCur->pDevInsR3 == pDevIns, NULL);
1651#else
1652 AssertReturn(pCur->pDevInsR3 == pDevIns->pDevInsForR3, NULL);
1653#endif
1654
1655 /* Does it match the offset? */
1656 if (offMmio2Page < pCur->cbReal)
1657 return &pCur->RamRange.aPages[offMmio2Page >> GUEST_PAGE_SHIFT];
1658
1659 /* Advance if we can. */
1660 AssertReturn(!(pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK), NULL);
1661 offMmio2Page -= pCur->cbReal;
1662 hMmio2++;
1663 AssertReturn(hMmio2 < RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3), NULL);
1664 pCur = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[hMmio2];
1665 AssertReturn(pCur, NULL);
1666 }
1667}
1668
1669
1670/**
1671 * Replaces an MMIO page with an MMIO2 page.
1672 *
1673 * This is a worker for IOMMMIOMapMMIO2Page that works in a similar way to
1674 * PGMHandlerPhysicalPageTempOff but for an MMIO page. Since an MMIO page has no
1675 * backing, the caller must provide a replacement page. For various reasons the
1676 * replacement page must be an MMIO2 page.
1677 *
1678 * The caller must do required page table modifications. You can get away
1679 * without making any modifications since it's an MMIO page, the cost is an extra
1680 * \#PF which will the resync the page.
1681 *
1682 * Call PGMHandlerPhysicalReset() to restore the MMIO page.
1683 *
1684 * The caller may still get handler callback even after this call and must be
1685 * able to deal correctly with such calls. The reason for these callbacks are
1686 * either that we're executing in the recompiler (which doesn't know about this
1687 * arrangement) or that we've been restored from saved state (where we won't
1688 * save the change).
1689 *
1690 * @returns VBox status code.
1691 * @param pVM The cross context VM structure.
1692 * @param GCPhys The start address of the access handler. This
1693 * must be a fully page aligned range or we risk
1694 * messing up other handlers installed for the
1695 * start and end pages.
1696 * @param GCPhysPage The physical address of the page to turn off
1697 * access monitoring for and replace with the MMIO2
1698 * page.
1699 * @param pDevIns The device instance owning @a hMmio2.
1700 * @param hMmio2 Handle to the MMIO2 region containing the page
1701 * to remap in the the MMIO page at @a GCPhys.
1702 * @param offMmio2PageRemap The offset into @a hMmio2 of the MMIO2 page that
1703 * should serve as backing memory.
1704 *
1705 * @remark May cause a page pool flush if used on a page that is already
1706 * aliased.
1707 *
1708 * @note This trick does only work reliably if the two pages are never ever
1709 * mapped in the same page table. If they are the page pool code will
1710 * be confused should either of them be flushed. See the special case
1711 * of zero page aliasing mentioned in #3170.
1712 *
1713 */
1714VMMDECL(int) PGMHandlerPhysicalPageAliasMmio2(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage,
1715 PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, RTGCPHYS offMmio2PageRemap)
1716{
1717#ifdef VBOX_WITH_PGM_NEM_MODE
1718 AssertReturn(!VM_IS_NEM_ENABLED(pVM) || !pVM->pgm.s.fNemMode, VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE);
1719#endif
1720 int rc = PGM_LOCK(pVM);
1721 AssertRCReturn(rc, rc);
1722
1723 /*
1724 * Resolve the MMIO2 reference.
1725 */
1726 PPGMPAGE pPageRemap = pgmPhysResolveMmio2PageLocked(pVM, pDevIns, hMmio2, offMmio2PageRemap);
1727 if (RT_LIKELY(pPageRemap))
1728 AssertMsgReturnStmt(PGM_PAGE_GET_TYPE(pPageRemap) == PGMPAGETYPE_MMIO2,
1729 ("hMmio2=%RU64 offMmio2PageRemap=%RGp %R[pgmpage]\n", hMmio2, offMmio2PageRemap, pPageRemap),
1730 PGM_UNLOCK(pVM), VERR_PGM_PHYS_NOT_MMIO2);
1731 else
1732 {
1733 PGM_UNLOCK(pVM);
1734 return VERR_OUT_OF_RANGE;
1735 }
1736
1737 /*
1738 * Lookup and validate the range.
1739 */
1740 PPGMPHYSHANDLER pCur;
1741 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1742 if (RT_SUCCESS(rc))
1743 {
1744 Assert(pCur->Key == GCPhys);
1745 if (RT_LIKELY( GCPhysPage >= pCur->Key
1746 && GCPhysPage <= pCur->KeyLast))
1747 {
1748 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1749 AssertReturnStmt(pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO, PGM_UNLOCK(pVM), VERR_ACCESS_DENIED);
1750 AssertReturnStmt(!(pCur->Key & GUEST_PAGE_OFFSET_MASK), PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
1751 AssertReturnStmt((pCur->KeyLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK,
1752 PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
1753
1754 /*
1755 * Validate the page.
1756 */
1757 PPGMPAGE pPage;
1758 PPGMRAMRANGE pRam;
1759 rc = pgmPhysGetPageAndRangeEx(pVM, GCPhysPage, &pPage, &pRam);
1760 AssertReturnStmt(RT_SUCCESS_NP(rc), PGM_UNLOCK(pVM), rc);
1761 if (PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO)
1762 {
1763 AssertMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO,
1764 ("GCPhysPage=%RGp %R[pgmpage]\n", GCPhysPage, pPage),
1765 VERR_PGM_PHYS_NOT_MMIO2);
1766 if (PGM_PAGE_GET_HCPHYS(pPage) == PGM_PAGE_GET_HCPHYS(pPageRemap))
1767 {
1768 PGM_UNLOCK(pVM);
1769 return VINF_PGM_HANDLER_ALREADY_ALIASED;
1770 }
1771
1772 /*
1773 * The page is already mapped as some other page, reset it
1774 * to an MMIO/ZERO page before doing the new mapping.
1775 */
1776 Log(("PGMHandlerPhysicalPageAliasMmio2: GCPhysPage=%RGp (%R[pgmpage]; %RHp -> %RHp\n",
1777 GCPhysPage, pPage, PGM_PAGE_GET_HCPHYS(pPage), PGM_PAGE_GET_HCPHYS(pPageRemap)));
1778 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhysPage, pRam,
1779 false /*fDoAccounting*/, false /*fFlushIemTlbs*/);
1780 pCur->cAliasedPages--;
1781
1782 /* Since this may be present in the TLB and now be wrong, invalid
1783 the guest physical address part of the IEM TLBs. Note, we do
1784 this here as we will not invalid */
1785 IEMTlbInvalidateAllPhysicalAllCpus(pVM, NIL_VMCPUID);
1786 }
1787 Assert(PGM_PAGE_IS_ZERO(pPage));
1788
1789 /*
1790 * Do the actual remapping here.
1791 * This page now serves as an alias for the backing memory specified.
1792 */
1793 LogFlow(("PGMHandlerPhysicalPageAliasMmio2: %RGp (%R[pgmpage]) alias for %RU64/%RGp (%R[pgmpage])\n",
1794 GCPhysPage, pPage, hMmio2, offMmio2PageRemap, pPageRemap ));
1795 PGM_PAGE_SET_HCPHYS(pVM, pPage, PGM_PAGE_GET_HCPHYS(pPageRemap));
1796 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_MMIO2_ALIAS_MMIO);
1797 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1798 PGM_PAGE_SET_PAGEID(pVM, pPage, PGM_PAGE_GET_PAGEID(pPageRemap));
1799 PGM_PAGE_SET_HNDL_PHYS_STATE_ONLY(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
1800 pCur->cAliasedPages++;
1801 Assert(pCur->cAliasedPages <= pCur->cPages);
1802
1803 /*
1804 * Flush its TLB entry.
1805 *
1806 * Not calling IEMTlbInvalidateAllPhysicalAllCpus here to conserve
1807 * all the other IEM TLB entires. When this one is kicked out and
1808 * reloaded, it will be using the MMIO2 alias, but till then we'll
1809 * continue doing MMIO.
1810 */
1811 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
1812 /** @todo Do some preformance checks of calling
1813 * IEMTlbInvalidateAllPhysicalAllCpus when in IEM mode, to see if it
1814 * actually makes sense or not. Screen updates are typically massive
1815 * and important when this kind of aliasing is used, so it may pay of... */
1816
1817#ifdef VBOX_WITH_NATIVE_NEM
1818 /* Tell NEM about the backing and protection change. */
1819 if (VM_IS_NEM_ENABLED(pVM))
1820 {
1821 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1822 NEMHCNotifyPhysPageChanged(pVM, GCPhysPage, pVM->pgm.s.HCPhysZeroPg, PGM_PAGE_GET_HCPHYS(pPage),
1823 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
1824 pgmPhysPageCalcNemProtection(pPage, PGMPAGETYPE_MMIO2_ALIAS_MMIO),
1825 PGMPAGETYPE_MMIO2_ALIAS_MMIO, &u2State);
1826 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1827 }
1828#endif
1829 LogFlow(("PGMHandlerPhysicalPageAliasMmio2: => %R[pgmpage]\n", pPage));
1830 PGM_UNLOCK(pVM);
1831 return VINF_SUCCESS;
1832 }
1833
1834 PGM_UNLOCK(pVM);
1835 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n", GCPhysPage, pCur->Key, pCur->KeyLast));
1836 return VERR_INVALID_PARAMETER;
1837 }
1838
1839 PGM_UNLOCK(pVM);
1840 if (rc == VERR_NOT_FOUND)
1841 {
1842 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
1843 return VERR_PGM_HANDLER_NOT_FOUND;
1844 }
1845 return rc;
1846}
1847
1848
1849/**
1850 * Replaces an MMIO page with an arbitrary HC page in the shadow page tables.
1851 *
1852 * This differs from PGMHandlerPhysicalPageAliasMmio2 in that the page doesn't
1853 * need to be a known MMIO2 page and that only shadow paging may access the
1854 * page. The latter distinction is important because the only use for this
1855 * feature is for mapping the special APIC access page that VT-x uses to detect
1856 * APIC MMIO operations, the page is shared between all guest CPUs and actually
1857 * not written to. At least at the moment.
1858 *
1859 * The caller must do required page table modifications. You can get away
1860 * without making any modifications since it's an MMIO page, the cost is an extra
1861 * \#PF which will the resync the page.
1862 *
1863 * Call PGMHandlerPhysicalReset() to restore the MMIO page.
1864 *
1865 *
1866 * @returns VBox status code.
1867 * @param pVM The cross context VM structure.
1868 * @param GCPhys The start address of the access handler. This
1869 * must be a fully page aligned range or we risk
1870 * messing up other handlers installed for the
1871 * start and end pages.
1872 * @param GCPhysPage The physical address of the page to turn off
1873 * access monitoring for.
1874 * @param HCPhysPageRemap The physical address of the HC page that
1875 * serves as backing memory.
1876 *
1877 * @remark May cause a page pool flush if used on a page that is already
1878 * aliased.
1879 */
1880VMMDECL(int) PGMHandlerPhysicalPageAliasHC(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage, RTHCPHYS HCPhysPageRemap)
1881{
1882/// Assert(!IOMIsLockOwner(pVM)); /* We mustn't own any other locks when calling this */
1883#ifdef VBOX_WITH_PGM_NEM_MODE
1884 AssertReturn(!VM_IS_NEM_ENABLED(pVM) || !pVM->pgm.s.fNemMode, VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE);
1885#endif
1886 int rc = PGM_LOCK(pVM);
1887 AssertRCReturn(rc, rc);
1888
1889 /*
1890 * Lookup and validate the range.
1891 */
1892 PPGMPHYSHANDLER pCur;
1893 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1894 if (RT_SUCCESS(rc))
1895 {
1896 Assert(pCur->Key == GCPhys);
1897 if (RT_LIKELY( GCPhysPage >= pCur->Key
1898 && GCPhysPage <= pCur->KeyLast))
1899 {
1900 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1901 AssertReturnStmt(pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO, PGM_UNLOCK(pVM), VERR_ACCESS_DENIED);
1902 AssertReturnStmt(!(pCur->Key & GUEST_PAGE_OFFSET_MASK), PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
1903 AssertReturnStmt((pCur->KeyLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK,
1904 PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
1905
1906 /*
1907 * Get and validate the pages.
1908 */
1909 PPGMPAGE pPage;
1910 rc = pgmPhysGetPageEx(pVM, GCPhysPage, &pPage);
1911 AssertReturnStmt(RT_SUCCESS_NP(rc), PGM_UNLOCK(pVM), rc);
1912 if (PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO)
1913 {
1914 PGM_UNLOCK(pVM);
1915 AssertMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO,
1916 ("GCPhysPage=%RGp %R[pgmpage]\n", GCPhysPage, pPage),
1917 VERR_PGM_PHYS_NOT_MMIO2);
1918 return VINF_PGM_HANDLER_ALREADY_ALIASED;
1919 }
1920 Assert(PGM_PAGE_IS_ZERO(pPage));
1921
1922 /*
1923 * Do the actual remapping here.
1924 * This page now serves as an alias for the backing memory
1925 * specified as far as shadow paging is concerned.
1926 */
1927 LogFlow(("PGMHandlerPhysicalPageAliasHC: %RGp (%R[pgmpage]) alias for %RHp\n",
1928 GCPhysPage, pPage, HCPhysPageRemap));
1929 PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhysPageRemap);
1930 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_SPECIAL_ALIAS_MMIO);
1931 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1932 PGM_PAGE_SET_PAGEID(pVM, pPage, NIL_GMM_PAGEID);
1933 PGM_PAGE_SET_HNDL_PHYS_STATE_ONLY(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
1934 pCur->cAliasedPages++;
1935 Assert(pCur->cAliasedPages <= pCur->cPages);
1936
1937 /*
1938 * Flush its TLB entry.
1939 *
1940 * Not calling IEMTlbInvalidateAllPhysicalAllCpus here as special
1941 * aliased MMIO pages are handled like MMIO by the IEM TLB.
1942 */
1943 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
1944
1945#ifdef VBOX_WITH_NATIVE_NEM
1946 /* Tell NEM about the backing and protection change. */
1947 if (VM_IS_NEM_ENABLED(pVM))
1948 {
1949 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhysPage);
1950 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1951 NEMHCNotifyPhysPageChanged(pVM, GCPhysPage, pVM->pgm.s.HCPhysZeroPg, PGM_PAGE_GET_HCPHYS(pPage),
1952 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
1953 pgmPhysPageCalcNemProtection(pPage, PGMPAGETYPE_SPECIAL_ALIAS_MMIO),
1954 PGMPAGETYPE_SPECIAL_ALIAS_MMIO, &u2State);
1955 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1956 }
1957#endif
1958 LogFlow(("PGMHandlerPhysicalPageAliasHC: => %R[pgmpage]\n", pPage));
1959 PGM_UNLOCK(pVM);
1960 return VINF_SUCCESS;
1961 }
1962 PGM_UNLOCK(pVM);
1963 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n", GCPhysPage, pCur->Key, pCur->KeyLast));
1964 return VERR_INVALID_PARAMETER;
1965 }
1966 PGM_UNLOCK(pVM);
1967
1968 if (rc == VERR_NOT_FOUND)
1969 {
1970 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
1971 return VERR_PGM_HANDLER_NOT_FOUND;
1972 }
1973 return rc;
1974}
1975
1976
1977/**
1978 * Checks if a physical range is handled
1979 *
1980 * @returns boolean
1981 * @param pVM The cross context VM structure.
1982 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister().
1983 * @remarks Caller must take the PGM lock...
1984 * @thread EMT.
1985 */
1986VMMDECL(bool) PGMHandlerPhysicalIsRegistered(PVMCC pVM, RTGCPHYS GCPhys)
1987{
1988 /*
1989 * Find the handler.
1990 */
1991 PGM_LOCK_VOID(pVM);
1992 PPGMPHYSHANDLER pCur;
1993 int rc = pgmHandlerPhysicalLookup(pVM, GCPhys, &pCur);
1994 if (RT_SUCCESS(rc))
1995 {
1996#ifdef VBOX_STRICT
1997 Assert(GCPhys >= pCur->Key && GCPhys <= pCur->KeyLast);
1998 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1999 Assert( pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE
2000 || pCurType->enmKind == PGMPHYSHANDLERKIND_ALL
2001 || pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO);
2002#endif
2003 PGM_UNLOCK(pVM);
2004 return true;
2005 }
2006 PGM_UNLOCK(pVM);
2007 return false;
2008}
2009
2010
2011/**
2012 * Checks if it's an disabled all access handler or write access handler at the
2013 * given address.
2014 *
2015 * @returns true if it's an all access handler, false if it's a write access
2016 * handler.
2017 * @param pVM The cross context VM structure.
2018 * @param GCPhys The address of the page with a disabled handler.
2019 *
2020 * @remarks The caller, PGMR3PhysTlbGCPhys2Ptr, must hold the PGM lock.
2021 */
2022bool pgmHandlerPhysicalIsAll(PVMCC pVM, RTGCPHYS GCPhys)
2023{
2024 PGM_LOCK_VOID(pVM);
2025 PPGMPHYSHANDLER pCur;
2026 int rc = pgmHandlerPhysicalLookup(pVM, GCPhys, &pCur);
2027 AssertRCReturnStmt(rc, PGM_UNLOCK(pVM), true);
2028
2029 /* Only whole pages can be disabled. */
2030 Assert( pCur->Key <= (GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK)
2031 && pCur->KeyLast >= (GCPhys | GUEST_PAGE_OFFSET_MASK));
2032
2033 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
2034 Assert( pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE
2035 || pCurType->enmKind == PGMPHYSHANDLERKIND_ALL
2036 || pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO); /* sanity */
2037 bool const fRet = pCurType->enmKind != PGMPHYSHANDLERKIND_WRITE;
2038 PGM_UNLOCK(pVM);
2039 return fRet;
2040}
2041
2042#ifdef VBOX_STRICT
2043
2044/**
2045 * State structure used by the PGMAssertHandlerAndFlagsInSync() function
2046 * and its AVL enumerators.
2047 */
2048typedef struct PGMAHAFIS
2049{
2050 /** The current physical address. */
2051 RTGCPHYS GCPhys;
2052 /** Number of errors. */
2053 unsigned cErrors;
2054 /** Pointer to the VM. */
2055 PVM pVM;
2056} PGMAHAFIS, *PPGMAHAFIS;
2057
2058
2059/**
2060 * Asserts that the handlers+guest-page-tables == ramrange-flags and
2061 * that the physical addresses associated with virtual handlers are correct.
2062 *
2063 * @returns Number of mismatches.
2064 * @param pVM The cross context VM structure.
2065 */
2066VMMDECL(unsigned) PGMAssertHandlerAndFlagsInSync(PVMCC pVM)
2067{
2068 PPGM pPGM = &pVM->pgm.s;
2069 PGMAHAFIS State;
2070 State.GCPhys = 0;
2071 State.cErrors = 0;
2072 State.pVM = pVM;
2073
2074 PGM_LOCK_ASSERT_OWNER(pVM);
2075
2076 /*
2077 * Check the RAM flags against the handlers.
2078 */
2079 PPGMPHYSHANDLERTREE const pPhysHandlerTree = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree;
2080 for (PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRangesX); pRam; pRam = pRam->CTX_SUFF(pNext))
2081 {
2082 const uint32_t cPages = pRam->cb >> GUEST_PAGE_SHIFT;
2083 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2084 {
2085 PGMPAGE const *pPage = &pRam->aPages[iPage];
2086 if (PGM_PAGE_HAS_ANY_HANDLERS(pPage))
2087 {
2088 State.GCPhys = pRam->GCPhys + (iPage << GUEST_PAGE_SHIFT);
2089
2090 /*
2091 * Physical first - calculate the state based on the handlers
2092 * active on the page, then compare.
2093 */
2094 if (PGM_PAGE_HAS_ANY_PHYSICAL_HANDLERS(pPage))
2095 {
2096 /* the first */
2097 PPGMPHYSHANDLER pPhys;
2098 int rc = pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, State.GCPhys, &pPhys);
2099 if (rc == VERR_NOT_FOUND)
2100 {
2101 rc = pPhysHandlerTree->lookupMatchingOrAbove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator,
2102 State.GCPhys, &pPhys);
2103 if (RT_SUCCESS(rc))
2104 {
2105 Assert(pPhys->Key >= State.GCPhys);
2106 if (pPhys->Key > (State.GCPhys + GUEST_PAGE_SIZE - 1))
2107 pPhys = NULL;
2108 }
2109 else
2110 AssertLogRelMsgReturn(rc == VERR_NOT_FOUND, ("rc=%Rrc GCPhys=%RGp\n", rc, State.GCPhys), 999);
2111 }
2112 else
2113 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("rc=%Rrc GCPhys=%RGp\n", rc, State.GCPhys), 999);
2114
2115 if (pPhys)
2116 {
2117 PCPGMPHYSHANDLERTYPEINT pPhysType = pgmHandlerPhysicalTypeHandleToPtr(pVM, pPhys->hType);
2118 unsigned uState = pPhysType->uState;
2119 bool const fNotInHm = pPhysType->fNotInHm; /* whole pages, so no need to accumulate sub-page configs. */
2120
2121 /* more? */
2122 while (pPhys->KeyLast < (State.GCPhys | GUEST_PAGE_OFFSET_MASK))
2123 {
2124 PPGMPHYSHANDLER pPhys2;
2125 rc = pPhysHandlerTree->lookupMatchingOrAbove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator,
2126 pPhys->KeyLast + 1, &pPhys2);
2127 if (rc == VERR_NOT_FOUND)
2128 break;
2129 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("rc=%Rrc KeyLast+1=%RGp\n", rc, pPhys->KeyLast + 1), 999);
2130 if (pPhys2->Key > (State.GCPhys | GUEST_PAGE_OFFSET_MASK))
2131 break;
2132 PCPGMPHYSHANDLERTYPEINT pPhysType2 = pgmHandlerPhysicalTypeHandleToPtr(pVM, pPhys2->hType);
2133 uState = RT_MAX(uState, pPhysType2->uState);
2134 pPhys = pPhys2;
2135 }
2136
2137 /* compare.*/
2138 if ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != uState
2139 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
2140 {
2141 AssertMsgFailed(("ram range vs phys handler flags mismatch. GCPhys=%RGp state=%d expected=%d %s\n",
2142 State.GCPhys, PGM_PAGE_GET_HNDL_PHYS_STATE(pPage), uState, pPhysType->pszDesc));
2143 State.cErrors++;
2144 }
2145 AssertMsgStmt(PGM_PAGE_IS_HNDL_PHYS_NOT_IN_HM(pPage) == fNotInHm,
2146 ("ram range vs phys handler flags mismatch. GCPhys=%RGp fNotInHm=%d, %d %s\n",
2147 State.GCPhys, PGM_PAGE_IS_HNDL_PHYS_NOT_IN_HM(pPage), fNotInHm, pPhysType->pszDesc),
2148 State.cErrors++);
2149 }
2150 else
2151 {
2152 AssertMsgFailed(("ram range vs phys handler mismatch. no handler for GCPhys=%RGp\n", State.GCPhys));
2153 State.cErrors++;
2154 }
2155 }
2156 }
2157 } /* foreach page in ram range. */
2158 } /* foreach ram range. */
2159
2160 /*
2161 * Do the reverse check for physical handlers.
2162 */
2163 /** @todo */
2164
2165 return State.cErrors;
2166}
2167
2168#endif /* VBOX_STRICT */
2169
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette