VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp@ 108132

Last change on this file since 108132 was 108132, checked in by vboxsync, 6 days ago

VMM/PGM: Merge and deduplicate code targeting x86 & amd64 in PGM.cpp. Don't bother compiling pool stuff on arm and darwin.amd64. jiraref:VBP-1531

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 87.0 KB
Line 
1/* $Id: PGMAllHandler.cpp 108132 2025-02-10 11:05:23Z vboxsync $ */
2/** @file
3 * PGM - Page Manager / Monitor, Access Handlers.
4 */
5
6/*
7 * Copyright (C) 2006-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_PGM
33#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
34#ifdef IN_RING0
35# define VBOX_VMM_TARGET_X86
36#endif
37#include <VBox/vmm/dbgf.h>
38#include <VBox/vmm/pgm.h>
39#include <VBox/vmm/iem.h>
40#include <VBox/vmm/iom.h>
41#include <VBox/vmm/mm.h>
42#include <VBox/vmm/em.h>
43#include <VBox/vmm/nem.h>
44#include <VBox/vmm/stam.h>
45#include <VBox/vmm/dbgf.h>
46#ifdef IN_RING0
47# include <VBox/vmm/pdmdev.h>
48#endif
49#include "PGMInternal.h"
50#include <VBox/vmm/vmcc.h>
51#include "PGMInline.h"
52
53#include <VBox/log.h>
54#include <iprt/assert.h>
55#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
56# include <iprt/asm-amd64-x86.h>
57#endif
58#include <iprt/string.h>
59#include <VBox/param.h>
60#include <VBox/err.h>
61#include <VBox/vmm/selm.h>
62
63
64/*********************************************************************************************************************************
65* Global Variables *
66*********************************************************************************************************************************/
67/** Dummy physical access handler type record. */
68CTX_SUFF(PGMPHYSHANDLERTYPEINT) const g_pgmHandlerPhysicalDummyType =
69{
70 /* .hType = */ UINT64_C(0x93b7557e1937aaff),
71 /* .enmKind = */ PGMPHYSHANDLERKIND_INVALID,
72 /* .uState = */ PGM_PAGE_HNDL_PHYS_STATE_ALL,
73 /* .fKeepPgmLock = */ true,
74 /* .fRing0DevInsIdx = */ false,
75#ifdef IN_RING0
76 /* .fNotInHm = */ false,
77 /* .pfnHandler = */ pgmR0HandlerPhysicalHandlerToRing3,
78 /* .pfnPfHandler = */ pgmR0HandlerPhysicalPfHandlerToRing3,
79#elif defined(IN_RING3)
80 /* .fRing0Enabled = */ false,
81 /* .fNotInHm = */ false,
82 /* .pfnHandler = */ pgmR3HandlerPhysicalHandlerInvalid,
83#else
84# error "unsupported context"
85#endif
86 /* .pszDesc = */ "dummy"
87};
88
89
90/*********************************************************************************************************************************
91* Internal Functions *
92*********************************************************************************************************************************/
93static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVMCC pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam,
94 void *pvBitmap, uint32_t offBitmap);
95static void pgmHandlerPhysicalDeregisterNotifyNEM(PVMCC pVM, PPGMPHYSHANDLER pCur);
96static void pgmHandlerPhysicalResetRamFlags(PVMCC pVM, PPGMPHYSHANDLER pCur);
97
98
99#ifndef IN_RING3
100
101/**
102 * @callback_method_impl{FNPGMPHYSHANDLER,
103 * Dummy for forcing ring-3 handling of the access.}
104 */
105DECLCALLBACK(VBOXSTRICTRC)
106pgmR0HandlerPhysicalHandlerToRing3(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
107 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, uint64_t uUser)
108{
109 RT_NOREF(pVM, pVCpu, GCPhys, pvPhys, pvBuf, cbBuf, enmAccessType, enmOrigin, uUser);
110 return VINF_EM_RAW_EMULATE_INSTR;
111}
112
113
114/**
115 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
116 * Dummy for forcing ring-3 handling of the access.}
117 */
118DECLCALLBACK(VBOXSTRICTRC)
119pgmR0HandlerPhysicalPfHandlerToRing3(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTX pCtx,
120 RTGCPTR pvFault, RTGCPHYS GCPhysFault, uint64_t uUser)
121{
122 RT_NOREF(pVM, pVCpu, uErrorCode, pCtx, pvFault, GCPhysFault, uUser);
123 return VINF_EM_RAW_EMULATE_INSTR;
124}
125
126#endif /* !IN_RING3 */
127
128
129/**
130 * Worker for pgmHandlerPhysicalExCreate.
131 *
132 * @returns A new physical handler on success or NULL on failure.
133 * @param pVM The cross context VM structure.
134 * @param pType The physical handler type.
135 * @param hType The physical handler type registeration handle.
136 * @param uUser User argument to the handlers (not pointer).
137 * @param pszDesc Description of this handler. If NULL, the type description
138 * will be used instead.
139 */
140DECL_FORCE_INLINE(PPGMPHYSHANDLER) pgmHandlerPhysicalExCreateWorker(PVMCC pVM, PCPGMPHYSHANDLERTYPEINT pType,
141 PGMPHYSHANDLERTYPE hType, uint64_t uUser,
142 R3PTRTYPE(const char *) pszDesc)
143{
144 PGM_LOCK_ASSERT_OWNER(pVM);
145 PPGMPHYSHANDLER pNew = pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator.allocateNode();
146 if (pNew)
147 {
148 pNew->Key = NIL_RTGCPHYS;
149 pNew->KeyLast = NIL_RTGCPHYS;
150 pNew->cPages = 0;
151 pNew->cAliasedPages = 0;
152 pNew->cTmpOffPages = 0;
153 pNew->uUser = uUser;
154 pNew->hType = hType;
155 pNew->pszDesc = pszDesc != NIL_RTR3PTR ? pszDesc
156#ifdef IN_RING3
157 : pType->pszDesc;
158#else
159 : pVM->pgm.s.aPhysHandlerTypes[hType & PGMPHYSHANDLERTYPE_IDX_MASK].pszDesc;
160 NOREF(pType);
161#endif
162 }
163 return pNew;
164}
165
166
167/**
168 * Creates a physical access handler, allocation part.
169 *
170 * @returns VBox status code.
171 * @retval VERR_OUT_OF_RESOURCES if no more handlers available.
172 *
173 * @param pVM The cross context VM structure.
174 * @param hType The handler type registration handle.
175 * @param uUser User argument to the handlers (not pointer).
176 * @param pszDesc Description of this handler. If NULL, the type
177 * description will be used instead.
178 * @param ppPhysHandler Where to return the access handler structure on
179 * success.
180 */
181int pgmHandlerPhysicalExCreate(PVMCC pVM, PGMPHYSHANDLERTYPE hType, uint64_t uUser,
182 R3PTRTYPE(const char *) pszDesc, PPGMPHYSHANDLER *ppPhysHandler)
183{
184 /*
185 * Validate input.
186 */
187 PCPGMPHYSHANDLERTYPEINT const pType = pgmHandlerPhysicalTypeHandleToPtr(pVM, hType);
188 AssertReturn(pType, VERR_INVALID_HANDLE);
189 AssertReturn(pType->enmKind > PGMPHYSHANDLERKIND_INVALID && pType->enmKind < PGMPHYSHANDLERKIND_END, VERR_INVALID_HANDLE);
190 AssertPtr(ppPhysHandler);
191
192 Log(("pgmHandlerPhysicalExCreate: uUser=%#RX64 hType=%#x (%d, %s) pszDesc=%RHv:%s\n",
193 uUser, hType, pType->enmKind, pType->pszDesc, pszDesc, R3STRING(pszDesc)));
194
195 /*
196 * Allocate and initialize the new entry.
197 */
198 int rc = PGM_LOCK(pVM);
199 AssertRCReturn(rc, rc);
200 *ppPhysHandler = pgmHandlerPhysicalExCreateWorker(pVM, pType, hType, uUser, pszDesc);
201 PGM_UNLOCK(pVM);
202 if (*ppPhysHandler)
203 return VINF_SUCCESS;
204 return VERR_OUT_OF_RESOURCES;
205}
206
207
208/**
209 * Duplicates a physical access handler.
210 *
211 * @returns VBox status code.
212 * @retval VINF_SUCCESS when successfully installed.
213 *
214 * @param pVM The cross context VM structure.
215 * @param pPhysHandlerSrc The source handler to duplicate
216 * @param ppPhysHandler Where to return the access handler structure on
217 * success.
218 */
219int pgmHandlerPhysicalExDup(PVMCC pVM, PPGMPHYSHANDLER pPhysHandlerSrc, PPGMPHYSHANDLER *ppPhysHandler)
220{
221 return pgmHandlerPhysicalExCreate(pVM, pPhysHandlerSrc->hType, pPhysHandlerSrc->uUser,
222 pPhysHandlerSrc->pszDesc, ppPhysHandler);
223}
224
225
226/**
227 * Register a access handler for a physical range.
228 *
229 * @returns VBox status code.
230 * @retval VINF_SUCCESS when successfully installed.
231 * @retval VINF_PGM_GCPHYS_ALIASED could be returned.
232 *
233 * @param pVM The cross context VM structure.
234 * @param pPhysHandler The physical handler.
235 * @param GCPhys Start physical address.
236 * @param GCPhysLast Last physical address. (inclusive)
237 */
238int pgmHandlerPhysicalExRegister(PVMCC pVM, PPGMPHYSHANDLER pPhysHandler, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast)
239{
240 /*
241 * Validate input.
242 */
243 AssertReturn(pPhysHandler, VERR_INVALID_POINTER);
244 PGMPHYSHANDLERTYPE const hType = pPhysHandler->hType;
245 PCPGMPHYSHANDLERTYPEINT const pType = pgmHandlerPhysicalTypeHandleToPtr(pVM, hType);
246 AssertReturn(pType, VERR_INVALID_HANDLE);
247 AssertReturn(pType->enmKind > PGMPHYSHANDLERKIND_INVALID && pType->enmKind < PGMPHYSHANDLERKIND_END, VERR_INVALID_HANDLE);
248
249 AssertPtr(pPhysHandler);
250
251 Log(("pgmHandlerPhysicalExRegister: GCPhys=%RGp GCPhysLast=%RGp hType=%#x (%d, %s) pszDesc=%RHv:%s\n", GCPhys, GCPhysLast,
252 hType, pType->enmKind, pType->pszDesc, pPhysHandler->pszDesc, R3STRING(pPhysHandler->pszDesc)));
253 AssertReturn(pPhysHandler->Key == NIL_RTGCPHYS, VERR_WRONG_ORDER);
254
255 AssertMsgReturn(GCPhys < GCPhysLast, ("GCPhys >= GCPhysLast (%#x >= %#x)\n", GCPhys, GCPhysLast), VERR_INVALID_PARAMETER);
256 Assert(GCPhysLast - GCPhys < _4G); /* ASSUMPTION in PGMAllPhys.cpp */
257
258 switch (pType->enmKind)
259 {
260 case PGMPHYSHANDLERKIND_WRITE:
261 if (!pType->fNotInHm)
262 break;
263 RT_FALL_THRU(); /* Simplification: fNotInHm can only be used with full pages */
264 case PGMPHYSHANDLERKIND_MMIO:
265 case PGMPHYSHANDLERKIND_ALL:
266 /* Simplification for PGMPhysRead, PGMR0Trap0eHandlerNPMisconfig and others: Full pages. */
267 AssertMsgReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_INVALID_PARAMETER);
268 AssertMsgReturn((GCPhysLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK, ("%RGp\n", GCPhysLast), VERR_INVALID_PARAMETER);
269 break;
270 default:
271 AssertMsgFailed(("Invalid input enmKind=%d!\n", pType->enmKind));
272 return VERR_INVALID_PARAMETER;
273 }
274
275 /*
276 * We require the range to be within registered ram.
277 * There is no apparent need to support ranges which cover more than one ram range.
278 */
279 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
280 if ( !pRam
281 || GCPhysLast > pRam->GCPhysLast)
282 {
283#ifdef IN_RING3
284 DBGFR3Info(pVM->pUVM, "phys", NULL, NULL);
285#endif
286 AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));
287 return VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
288 }
289 Assert(GCPhys >= pRam->GCPhys && GCPhys < pRam->GCPhysLast);
290 Assert(GCPhysLast <= pRam->GCPhysLast && GCPhysLast >= pRam->GCPhys);
291
292 /*
293 * Try insert into list.
294 */
295 pPhysHandler->Key = GCPhys;
296 pPhysHandler->KeyLast = GCPhysLast;
297 pPhysHandler->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
298
299 int rc = PGM_LOCK(pVM);
300 if (RT_SUCCESS(rc))
301 {
302 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->insert(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, pPhysHandler);
303 if (RT_SUCCESS(rc))
304 {
305 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pPhysHandler, pRam, NULL /*pvBitmap*/, 0 /*offBitmap*/);
306 if (rc == VINF_PGM_SYNC_CR3)
307 rc = VINF_PGM_GCPHYS_ALIASED;
308
309#if defined(IN_RING3) || defined(IN_RING0)
310 NEMHCNotifyHandlerPhysicalRegister(pVM, pType->enmKind, GCPhys, GCPhysLast - GCPhys + 1);
311#endif
312 PGM_UNLOCK(pVM);
313
314 if (rc != VINF_SUCCESS)
315 Log(("PGMHandlerPhysicalRegisterEx: returns %Rrc (%RGp-%RGp)\n", rc, GCPhys, GCPhysLast));
316 return rc;
317 }
318 PGM_UNLOCK(pVM);
319 }
320
321 pPhysHandler->Key = NIL_RTGCPHYS;
322 pPhysHandler->KeyLast = NIL_RTGCPHYS;
323
324 AssertMsgReturn(rc == VERR_ALREADY_EXISTS, ("%Rrc GCPhys=%RGp GCPhysLast=%RGp\n", rc, GCPhys, GCPhysLast), rc);
325
326#if defined(IN_RING3) && defined(VBOX_STRICT)
327 DBGFR3Info(pVM->pUVM, "handlers", "phys nostats", NULL);
328#endif
329 AssertMsgFailed(("Conflict! GCPhys=%RGp GCPhysLast=%RGp pszDesc=%s/%s\n",
330 GCPhys, GCPhysLast, R3STRING(pPhysHandler->pszDesc), R3STRING(pType->pszDesc)));
331 return VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
332}
333
334
335/**
336 * Worker for pgmHandlerPhysicalRegisterVmxApicAccessPage.
337 *
338 * @returns VBox status code.
339 * @retval VINF_SUCCESS when successfully installed.
340 * @retval VINF_PGM_GCPHYS_ALIASED could be returned.
341 *
342 * @param pVM The cross context VM structure.
343 * @param pPhysHandler The physical handler.
344 * @param GCPhys The address of the virtual VMX APIC-access page. Must be
345 * page aligned.
346 */
347static int pgmHandlerPhysicalRegisterVmxApicAccessPage(PVMCC pVM, PPGMPHYSHANDLER pPhysHandler, RTGCPHYS GCPhys)
348{
349 PGM_LOCK_ASSERT_OWNER(pVM);
350 LogFunc(("GCPhys=%RGp\n", GCPhys));
351
352 /*
353 * We require the range to be within registered ram.
354 * There is no apparent need to support ranges which cover more than one ram range.
355 */
356 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
357 RTGCPHYS const GCPhysLast = GCPhys | X86_PAGE_4K_OFFSET_MASK;
358 if ( !pRam
359 || GCPhysLast > pRam->GCPhysLast)
360 {
361#ifdef IN_RING3
362 DBGFR3Info(pVM->pUVM, "phys", NULL, NULL);
363#endif
364 AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));
365 return VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
366 }
367 Assert(GCPhys >= pRam->GCPhys && GCPhys < pRam->GCPhysLast);
368 Assert(GCPhysLast <= pRam->GCPhysLast && GCPhysLast >= pRam->GCPhys);
369
370 /*
371 * Try insert into list.
372 */
373 pPhysHandler->Key = GCPhys;
374 pPhysHandler->KeyLast = GCPhysLast;
375 pPhysHandler->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
376
377 int rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->insert(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, pPhysHandler);
378 if (RT_SUCCESS(rc))
379 {
380 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pPhysHandler, pRam, NULL /*pvBitmap*/, 0 /*offBitmap*/);
381 if (rc == VINF_PGM_SYNC_CR3)
382 rc = VINF_PGM_GCPHYS_ALIASED;
383
384#if defined(IN_RING3) || defined(IN_RING0)
385 NEMHCNotifyHandlerPhysicalRegister(pVM, PGMPHYSHANDLERKIND_ALL, GCPhys, GCPhysLast - GCPhys + 1);
386#endif
387 return rc;
388 }
389
390 pPhysHandler->Key = NIL_RTGCPHYS;
391 pPhysHandler->KeyLast = NIL_RTGCPHYS;
392
393 AssertMsgReturn(rc == VERR_ALREADY_EXISTS, ("%Rrc GCPhys=%RGp GCPhysLast=%RGp\n", rc, GCPhys, GCPhysLast), rc);
394#if defined(IN_RING3) && defined(VBOX_STRICT)
395 DBGFR3Info(pVM->pUVM, "handlers", "phys nostats", NULL);
396#endif
397 AssertMsgFailed(("Conflict! GCPhys=%RGp GCPhysLast=%RGp\n", GCPhys, GCPhysLast));
398 return VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
399}
400
401
402/**
403 * Register a access handler for a physical range.
404 *
405 * @returns VBox status code.
406 * @retval VINF_SUCCESS when successfully installed.
407 * @retval VINF_PGM_GCPHYS_ALIASED when the shadow PTs could be updated because
408 * the guest page aliased or/and mapped by multiple PTs. A CR3 sync has been
409 * flagged together with a pool clearing.
410 * @retval VERR_PGM_HANDLER_PHYSICAL_CONFLICT if the range conflicts with an existing
411 * one. A debug assertion is raised.
412 *
413 * @param pVM The cross context VM structure.
414 * @param GCPhys Start physical address.
415 * @param GCPhysLast Last physical address. (inclusive)
416 * @param hType The handler type registration handle.
417 * @param uUser User argument to the handler.
418 * @param pszDesc Description of this handler. If NULL, the type
419 * description will be used instead.
420 */
421VMMDECL(int) PGMHandlerPhysicalRegister(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast, PGMPHYSHANDLERTYPE hType,
422 uint64_t uUser, R3PTRTYPE(const char *) pszDesc)
423{
424#ifdef LOG_ENABLED
425 PCPGMPHYSHANDLERTYPEINT pType = pgmHandlerPhysicalTypeHandleToPtr(pVM, hType);
426 Log(("PGMHandlerPhysicalRegister: GCPhys=%RGp GCPhysLast=%RGp uUser=%#RX64 hType=%#x (%d, %s) pszDesc=%RHv:%s\n",
427 GCPhys, GCPhysLast, uUser, hType, pType->enmKind, R3STRING(pType->pszDesc), pszDesc, R3STRING(pszDesc)));
428#endif
429
430 PPGMPHYSHANDLER pNew;
431 int rc = pgmHandlerPhysicalExCreate(pVM, hType, uUser, pszDesc, &pNew);
432 if (RT_SUCCESS(rc))
433 {
434 rc = pgmHandlerPhysicalExRegister(pVM, pNew, GCPhys, GCPhysLast);
435 if (RT_SUCCESS(rc))
436 return rc;
437 pgmHandlerPhysicalExDestroy(pVM, pNew);
438 }
439 return rc;
440}
441
442
443/**
444 * Register an access handler for a virtual VMX APIC-access page.
445 *
446 * This holds the PGM lock across the whole operation to resolve races between
447 * VCPUs registering the same page simultaneously. It's also a slightly slimmer
448 * version of the regular registeration function as it's specific to the VMX
449 * APIC-access page.
450 *
451 * @returns VBox status code.
452 * @retval VINF_SUCCESS when successfully installed.
453 * @retval VINF_PGM_GCPHYS_ALIASED when the shadow PTs could be updated because
454 * the guest page aliased or/and mapped by multiple PTs. A CR3 sync has been
455 * flagged together with a pool clearing.
456 * @retval VERR_PGM_HANDLER_PHYSICAL_CONFLICT if the range conflicts with an existing
457 * one. A debug assertion is raised.
458 *
459 * @param pVM The cross context VM structure.
460 * @param GCPhys The address of the VMX virtual-APIC access page. Must be
461 * page aligned.
462 * @param hType The handler type registration handle.
463 */
464VMMDECL(int) PGMHandlerPhysicalRegisterVmxApicAccessPage(PVMCC pVM, RTGCPHYS GCPhys, PGMPHYSHANDLERTYPE hType)
465{
466 PCPGMPHYSHANDLERTYPEINT const pType = pgmHandlerPhysicalTypeHandleToPtr(pVM, hType);
467 AssertReturn(pType, VERR_INVALID_HANDLE);
468 AssertMsg(!(GCPhys & GUEST_PAGE_OFFSET_MASK), ("%RGp\n", GCPhys));
469
470 /*
471 * Find if the VMX APIC access page has already been registered at this address.
472 */
473 int rc = PGM_LOCK(pVM);
474 AssertRCReturn(rc, rc);
475
476 PPGMPHYSHANDLER pHandler;
477 rc = pgmHandlerPhysicalLookup(pVM, GCPhys, &pHandler);
478 if (RT_SUCCESS(rc))
479 {
480 PCPGMPHYSHANDLERTYPEINT const pHandlerType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pHandler);
481 Assert(GCPhys >= pHandler->Key && GCPhys <= pHandler->KeyLast);
482 Assert( pHandlerType->enmKind == PGMPHYSHANDLERKIND_WRITE
483 || pHandlerType->enmKind == PGMPHYSHANDLERKIND_ALL
484 || pHandlerType->enmKind == PGMPHYSHANDLERKIND_MMIO);
485
486 /* Check it's the virtual VMX APIC-access page. */
487 if (pHandlerType->fNotInHm)
488 {
489 Assert(pHandlerType->enmKind == PGMPHYSHANDLERKIND_ALL);
490 rc = VINF_SUCCESS;
491 }
492 else
493 {
494 rc = VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
495 AssertMsgFailed(("Conflict! GCPhys=%RGp enmKind=%#x fNotInHm=%RTbool\n", GCPhys, pHandlerType->enmKind,
496 pHandlerType->fNotInHm));
497 }
498
499 PGM_UNLOCK(pVM);
500 return rc;
501 }
502
503 /*
504 * Validate the page handler parameters before registering the virtual VMX APIC-access page.
505 */
506 AssertReturn(pType->enmKind == PGMPHYSHANDLERKIND_ALL, VERR_INVALID_HANDLE);
507 AssertReturn(pType->fNotInHm, VERR_PGM_HANDLER_IPE_1);
508
509 /*
510 * Create and register a physical handler for the virtual VMX APIC-access page.
511 */
512 pHandler = pgmHandlerPhysicalExCreateWorker(pVM, pType, hType, 0 /*uUser*/, NULL /*pszDesc*/);
513 if (pHandler)
514 {
515 rc = pgmHandlerPhysicalRegisterVmxApicAccessPage(pVM, pHandler, GCPhys);
516 if (RT_SUCCESS(rc))
517 { /* likely */ }
518 else
519 pgmHandlerPhysicalExDestroy(pVM, pHandler);
520 }
521 else
522 rc = VERR_OUT_OF_RESOURCES;
523
524 PGM_UNLOCK(pVM);
525 return rc;
526}
527
528
529/**
530 * Sets ram range flags and attempts updating shadow PTs.
531 *
532 * @returns VBox status code.
533 * @retval VINF_SUCCESS when shadow PTs was successfully updated.
534 * @retval VINF_PGM_SYNC_CR3 when the shadow PTs could be updated because
535 * the guest page aliased or/and mapped by multiple PTs. FFs set.
536 * @param pVM The cross context VM structure.
537 * @param pCur The physical handler.
538 * @param pRam The RAM range.
539 * @param pvBitmap Dirty bitmap. Optional.
540 * @param offBitmap Dirty bitmap offset.
541 */
542static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVMCC pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam,
543 void *pvBitmap, uint32_t offBitmap)
544{
545 /*
546 * Iterate the guest ram pages updating the flags and flushing PT entries
547 * mapping the page.
548 */
549 bool fFlushTLBs = false;
550 int rc = VINF_SUCCESS;
551 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
552 const unsigned uState = pCurType->uState;
553 uint32_t cPages = pCur->cPages;
554 uint32_t i = (pCur->Key - pRam->GCPhys) >> GUEST_PAGE_SHIFT;
555 for (;;)
556 {
557 PPGMPAGE pPage = &pRam->aPages[i];
558 AssertMsg(pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO || PGM_PAGE_IS_MMIO(pPage),
559 ("%RGp %R[pgmpage]\n", pRam->GCPhys + (i << GUEST_PAGE_SHIFT), pPage));
560
561 /* Only do upgrades. */
562 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) < uState)
563 {
564 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, uState, pCurType->fNotInHm);
565
566#if defined(VBOX_WITH_NATIVE_NEM) || !defined(VBOX_WITH_ONLY_PGM_NEM_MODE)
567 const RTGCPHYS GCPhysPage = pRam->GCPhys + (i << GUEST_PAGE_SHIFT);
568#endif
569#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
570 int rc2 = pgmPoolTrackUpdateGCPhys(pVM, GCPhysPage, pPage,
571 false /* allow updates of PTEs (instead of flushing) */, &fFlushTLBs);
572 if (rc2 != VINF_SUCCESS && rc == VINF_SUCCESS)
573 rc = rc2;
574#endif
575
576#ifdef VBOX_WITH_NATIVE_NEM
577 /* Tell NEM about the protection update. */
578 if (VM_IS_NEM_ENABLED(pVM))
579 {
580 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
581 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
582 NEMHCNotifyPhysPageProtChanged(pVM, GCPhysPage, PGM_PAGE_GET_HCPHYS(pPage),
583 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
584 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
585 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
586 }
587#endif
588 if (pvBitmap)
589 ASMBitSet(pvBitmap, offBitmap);
590 }
591
592 /* next */
593 if (--cPages == 0)
594 break;
595 i++;
596 offBitmap++;
597 }
598
599 if (fFlushTLBs)
600 {
601 PGM_INVL_ALL_VCPU_TLBS(pVM);
602 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: flushing guest TLBs; rc=%d\n", rc));
603 }
604 else
605 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: doesn't flush guest TLBs. rc=%Rrc; sync flags=%x VMCPU_FF_PGM_SYNC_CR3=%d\n", rc, VMMGetCpu(pVM)->pgm.s.fSyncFlags, VMCPU_FF_IS_SET(VMMGetCpu(pVM), VMCPU_FF_PGM_SYNC_CR3)));
606
607 return rc;
608}
609
610
611/**
612 * Deregister a physical page access handler.
613 *
614 * @returns VBox status code.
615 * @param pVM The cross context VM structure.
616 * @param pPhysHandler The handler to deregister (but not free).
617 */
618int pgmHandlerPhysicalExDeregister(PVMCC pVM, PPGMPHYSHANDLER pPhysHandler)
619{
620 LogFlow(("pgmHandlerPhysicalExDeregister: Removing Range %RGp-%RGp %s\n",
621 pPhysHandler->Key, pPhysHandler->KeyLast, R3STRING(pPhysHandler->pszDesc)));
622
623 int rc = PGM_LOCK(pVM);
624 AssertRCReturn(rc, rc);
625
626 RTGCPHYS const GCPhys = pPhysHandler->Key;
627 AssertReturnStmt(GCPhys != NIL_RTGCPHYS, PGM_UNLOCK(pVM), VERR_PGM_HANDLER_NOT_FOUND);
628
629 /*
630 * Remove the handler from the tree.
631 */
632
633 PPGMPHYSHANDLER pRemoved;
634 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->remove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pRemoved);
635 if (RT_SUCCESS(rc))
636 {
637 if (pRemoved == pPhysHandler)
638 {
639 /*
640 * Clear the page bits, notify the REM about this change and clear
641 * the cache.
642 */
643 pgmHandlerPhysicalResetRamFlags(pVM, pPhysHandler);
644 if (VM_IS_NEM_ENABLED(pVM))
645 pgmHandlerPhysicalDeregisterNotifyNEM(pVM, pPhysHandler);
646 pVM->pgm.s.idxLastPhysHandler = 0;
647
648 pPhysHandler->Key = NIL_RTGCPHYS;
649 pPhysHandler->KeyLast = NIL_RTGCPHYS;
650
651 PGM_UNLOCK(pVM);
652
653 return VINF_SUCCESS;
654 }
655
656 /*
657 * Both of the failure conditions here are considered internal processing
658 * errors because they can only be caused by race conditions or corruption.
659 * If we ever need to handle concurrent deregistration, we have to move
660 * the NIL_RTGCPHYS check inside the PGM lock.
661 */
662 pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->insert(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, pRemoved);
663 }
664
665 PGM_UNLOCK(pVM);
666
667 if (RT_FAILURE(rc))
668 AssertMsgFailed(("Didn't find range starting at %RGp in the tree! %Rrc=rc\n", GCPhys, rc));
669 else
670 AssertMsgFailed(("Found different handle at %RGp in the tree: got %p insteaded of %p\n",
671 GCPhys, pRemoved, pPhysHandler));
672 return VERR_PGM_HANDLER_IPE_1;
673}
674
675
676/**
677 * Destroys (frees) a physical handler.
678 *
679 * The caller must deregister it before destroying it!
680 *
681 * @returns VBox status code.
682 * @param pVM The cross context VM structure.
683 * @param pHandler The handler to free. NULL if ignored.
684 */
685int pgmHandlerPhysicalExDestroy(PVMCC pVM, PPGMPHYSHANDLER pHandler)
686{
687 if (pHandler)
688 {
689 AssertPtr(pHandler);
690 AssertReturn(pHandler->Key == NIL_RTGCPHYS, VERR_WRONG_ORDER);
691
692 int rc = PGM_LOCK(pVM);
693 if (RT_SUCCESS(rc))
694 {
695 rc = pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator.freeNode(pHandler);
696 PGM_UNLOCK(pVM);
697 }
698 return rc;
699 }
700 return VINF_SUCCESS;
701}
702
703
704/**
705 * Deregister a physical page access handler.
706 *
707 * @returns VBox status code.
708 * @param pVM The cross context VM structure.
709 * @param GCPhys Start physical address.
710 */
711VMMDECL(int) PGMHandlerPhysicalDeregister(PVMCC pVM, RTGCPHYS GCPhys)
712{
713 AssertReturn(pVM->VMCC_CTX(pgm).s.pPhysHandlerTree, VERR_PGM_HANDLER_IPE_1);
714
715 /*
716 * Find the handler.
717 */
718 int rc = PGM_LOCK(pVM);
719 AssertRCReturn(rc, rc);
720
721 PPGMPHYSHANDLER pRemoved;
722 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->remove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pRemoved);
723 if (RT_SUCCESS(rc))
724 {
725 Assert(pRemoved->Key == GCPhys);
726 LogFlow(("PGMHandlerPhysicalDeregister: Removing Range %RGp-%RGp %s\n",
727 pRemoved->Key, pRemoved->KeyLast, R3STRING(pRemoved->pszDesc)));
728
729 /*
730 * Clear the page bits, notify the REM about this change and clear
731 * the cache.
732 */
733 pgmHandlerPhysicalResetRamFlags(pVM, pRemoved);
734 if (VM_IS_NEM_ENABLED(pVM))
735 pgmHandlerPhysicalDeregisterNotifyNEM(pVM, pRemoved);
736 pVM->pgm.s.idxLastPhysHandler = 0;
737
738 pRemoved->Key = NIL_RTGCPHYS;
739 rc = pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator.freeNode(pRemoved);
740
741 PGM_UNLOCK(pVM);
742 return rc;
743 }
744
745 PGM_UNLOCK(pVM);
746
747 if (rc == VERR_NOT_FOUND)
748 {
749 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
750 rc = VERR_PGM_HANDLER_NOT_FOUND;
751 }
752 return rc;
753}
754
755
756/**
757 * Shared code with modify.
758 */
759static void pgmHandlerPhysicalDeregisterNotifyNEM(PVMCC pVM, PPGMPHYSHANDLER pCur)
760{
761#ifdef VBOX_WITH_NATIVE_NEM
762 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
763 RTGCPHYS GCPhysStart = pCur->Key;
764 RTGCPHYS GCPhysLast = pCur->KeyLast;
765
766 /*
767 * Page align the range.
768 *
769 * Since we've reset (recalculated) the physical handler state of all pages
770 * we can make use of the page states to figure out whether a page should be
771 * included in the REM notification or not.
772 */
773 if ( (pCur->Key & GUEST_PAGE_OFFSET_MASK)
774 || ((pCur->KeyLast + 1) & GUEST_PAGE_OFFSET_MASK))
775 {
776 Assert(pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO);
777
778 if (GCPhysStart & GUEST_PAGE_OFFSET_MASK)
779 {
780 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhysStart);
781 if ( pPage
782 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
783 {
784 RTGCPHYS GCPhys = (GCPhysStart + (GUEST_PAGE_SIZE - 1)) & X86_PTE_PAE_PG_MASK;
785 if ( GCPhys > GCPhysLast
786 || GCPhys < GCPhysStart)
787 return;
788 GCPhysStart = GCPhys;
789 }
790 else
791 GCPhysStart &= X86_PTE_PAE_PG_MASK;
792 Assert(!pPage || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO); /* these are page aligned atm! */
793 }
794
795 if (GCPhysLast & GUEST_PAGE_OFFSET_MASK)
796 {
797 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhysLast);
798 if ( pPage
799 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
800 {
801 RTGCPHYS GCPhys = (GCPhysLast & X86_PTE_PAE_PG_MASK) - 1;
802 if ( GCPhys < GCPhysStart
803 || GCPhys > GCPhysLast)
804 return;
805 GCPhysLast = GCPhys;
806 }
807 else
808 GCPhysLast |= GUEST_PAGE_OFFSET_MASK;
809 Assert(!pPage || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO); /* these are page aligned atm! */
810 }
811 }
812
813 /*
814 * Tell NEM.
815 */
816 PPGMRAMRANGE const pRam = pgmPhysGetRange(pVM, GCPhysStart);
817 RTGCPHYS const cb = GCPhysLast - GCPhysStart + 1;
818 uint8_t u2State = UINT8_MAX;
819 NEMHCNotifyHandlerPhysicalDeregister(pVM, pCurType->enmKind, GCPhysStart, cb,
820 pRam ? PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysStart) : NULL, &u2State);
821 if (u2State != UINT8_MAX && pRam)
822 pgmPhysSetNemStateForPages(&pRam->aPages[(GCPhysStart - pRam->GCPhys) >> GUEST_PAGE_SHIFT],
823 cb >> GUEST_PAGE_SHIFT, u2State);
824#else
825 RT_NOREF(pVM, pCur);
826#endif
827}
828
829
830/**
831 * pgmHandlerPhysicalResetRamFlags helper that checks for other handlers on
832 * edge pages.
833 */
834DECLINLINE(void) pgmHandlerPhysicalRecalcPageState(PVMCC pVM, RTGCPHYS GCPhys, bool fAbove, PPGMRAMRANGE *ppRamHint)
835{
836 /*
837 * Look for other handlers.
838 */
839 unsigned uState = PGM_PAGE_HNDL_PHYS_STATE_NONE;
840 for (;;)
841 {
842 PPGMPHYSHANDLER pCur;
843 int rc;
844 if (fAbove)
845 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookupMatchingOrAbove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator,
846 GCPhys, &pCur);
847 else
848 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookupMatchingOrBelow(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator,
849 GCPhys, &pCur);
850 if (rc == VERR_NOT_FOUND)
851 break;
852 AssertRCBreak(rc);
853 if (((fAbove ? pCur->Key : pCur->KeyLast) >> GUEST_PAGE_SHIFT) != (GCPhys >> GUEST_PAGE_SHIFT))
854 break;
855 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
856 uState = RT_MAX(uState, pCurType->uState);
857
858 /* next? */
859 RTGCPHYS GCPhysNext = fAbove
860 ? pCur->KeyLast + 1
861 : pCur->Key - 1;
862 if ((GCPhysNext >> GUEST_PAGE_SHIFT) != (GCPhys >> GUEST_PAGE_SHIFT))
863 break;
864 GCPhys = GCPhysNext;
865 }
866
867 /*
868 * Update if we found something that is a higher priority state than the current.
869 * Note! The PGMPHYSHANDLER_F_NOT_IN_HM can be ignored here as it requires whole pages.
870 */
871 if (uState != PGM_PAGE_HNDL_PHYS_STATE_NONE)
872 {
873 PPGMPAGE pPage;
874 int rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, ppRamHint);
875 if ( RT_SUCCESS(rc)
876 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) < uState)
877 {
878 /* This should normally not be necessary. */
879 PGM_PAGE_SET_HNDL_PHYS_STATE_ONLY(pPage, uState);
880#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
881 bool fFlushTLBs;
882 rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhys, pPage, false /*fFlushPTEs*/, &fFlushTLBs);
883 if (RT_SUCCESS(rc) && fFlushTLBs)
884 PGM_INVL_ALL_VCPU_TLBS(pVM);
885 else
886 AssertRC(rc);
887#endif
888
889#ifdef VBOX_WITH_NATIVE_NEM
890 /* Tell NEM about the protection update. */
891 if (VM_IS_NEM_ENABLED(pVM))
892 {
893 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
894 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
895 NEMHCNotifyPhysPageProtChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage),
896 PGM_RAMRANGE_CALC_PAGE_R3PTR(*ppRamHint, GCPhys),
897 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
898 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
899 }
900#endif
901 }
902 else
903 AssertRC(rc);
904 }
905}
906
907
908/**
909 * Resets an aliased page.
910 *
911 * @param pVM The cross context VM structure.
912 * @param pPage The page.
913 * @param GCPhysPage The page address in case it comes in handy.
914 * @param pRam The RAM range the page is associated with (for NEM
915 * notifications).
916 * @param fDoAccounting Whether to perform accounting. (Only set during
917 * reset where pgmR3PhysRamReset doesn't have the
918 * handler structure handy.)
919 * @param fFlushIemTlbs Whether to perform IEM TLB flushing or not. This
920 * can be cleared only if the caller does the flushing
921 * after calling this function.
922 */
923void pgmHandlerPhysicalResetAliasedPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage, PPGMRAMRANGE pRam,
924 bool fDoAccounting, bool fFlushIemTlbs)
925{
926 Assert( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
927 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO);
928 Assert(PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
929#ifdef VBOX_WITH_NATIVE_NEM
930 RTHCPHYS const HCPhysPrev = PGM_PAGE_GET_HCPHYS(pPage);
931#endif
932
933 /*
934 * Flush any shadow page table references *first*.
935 */
936#if defined(VBOX_VMM_TARGET_ARMV8)
937 AssertReleaseFailed();
938#endif
939#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
940 bool fFlushTLBs = false;
941 int rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhysPage, pPage, true /*fFlushPTEs*/, &fFlushTLBs);
942 AssertLogRelRCReturnVoid(rc);
943 HMFlushTlbOnAllVCpus(pVM);
944#endif
945
946 /*
947 * Make it an MMIO/Zero page.
948 */
949 PGM_PAGE_SET_HCPHYS(pVM, pPage, pVM->pgm.s.HCPhysZeroPg);
950 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_MMIO);
951 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
952 PGM_PAGE_SET_PAGEID(pVM, pPage, NIL_GMM_PAGEID);
953 PGM_PAGE_SET_HNDL_PHYS_STATE_ONLY(pPage, PGM_PAGE_HNDL_PHYS_STATE_ALL);
954
955 /*
956 * Flush its TLB entry.
957 */
958 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
959 if (fFlushIemTlbs)
960 IEMTlbInvalidateAllPhysicalAllCpus(pVM, NIL_VMCPUID, IEMTLBPHYSFLUSHREASON_RESET_ALIAS);
961
962 /*
963 * Do accounting for pgmR3PhysRamReset.
964 */
965 if (fDoAccounting)
966 {
967 PPGMPHYSHANDLER pHandler;
968 int rc2 = pgmHandlerPhysicalLookup(pVM, GCPhysPage, &pHandler);
969 if (RT_SUCCESS(rc2))
970 {
971 Assert(pHandler->cAliasedPages > 0);
972 pHandler->cAliasedPages--;
973 }
974 else
975 AssertMsgFailed(("rc2=%Rrc GCPhysPage=%RGp\n", rc2, GCPhysPage));
976 }
977
978#ifdef VBOX_WITH_NATIVE_NEM
979 /*
980 * Tell NEM about the protection change.
981 */
982 if (VM_IS_NEM_ENABLED(pVM))
983 {
984 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
985 NEMHCNotifyPhysPageChanged(pVM, GCPhysPage, HCPhysPrev,
986# ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
987 pVM->pgm.s.HCPhysZeroPg,
988# else
989 0,
990# endif
991 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
992 NEM_PAGE_PROT_NONE, PGMPAGETYPE_MMIO, &u2State);
993 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
994 }
995#else
996 RT_NOREF(pRam);
997#endif
998}
999
1000
1001/**
1002 * Resets ram range flags.
1003 *
1004 * @param pVM The cross context VM structure.
1005 * @param pCur The physical handler.
1006 *
1007 * @remark We don't start messing with the shadow page tables, as we've
1008 * already got code in Trap0e which deals with out of sync handler
1009 * flags (originally conceived for global pages).
1010 */
1011static void pgmHandlerPhysicalResetRamFlags(PVMCC pVM, PPGMPHYSHANDLER pCur)
1012{
1013 /*
1014 * Iterate the guest ram pages updating the state.
1015 */
1016 RTUINT cPages = pCur->cPages;
1017 RTGCPHYS GCPhys = pCur->Key;
1018 PPGMRAMRANGE pRamHint = NULL;
1019 for (;;)
1020 {
1021 PPGMPAGE pPage;
1022 int rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, &pRamHint);
1023 if (RT_SUCCESS(rc))
1024 {
1025 /* Reset aliased MMIO pages to MMIO, since this aliasing is our business.
1026 (We don't flip MMIO to RAM though, that's PGMPhys.cpp's job.) */
1027 bool fNemNotifiedAlready = false;
1028 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
1029 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO)
1030 {
1031 Assert(pCur->cAliasedPages > 0);
1032 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhys, pRamHint, false /*fDoAccounting*/, true /*fFlushIemTlbs*/);
1033 pCur->cAliasedPages--;
1034 fNemNotifiedAlready = true;
1035 }
1036#ifdef VBOX_STRICT
1037 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
1038 AssertMsg(pCurType && (pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO || PGM_PAGE_IS_MMIO(pPage)),
1039 ("%RGp %R[pgmpage]\n", GCPhys, pPage));
1040#endif
1041 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_NONE, false);
1042
1043#ifdef VBOX_WITH_NATIVE_NEM
1044 /* Tell NEM about the protection change. */
1045 if (VM_IS_NEM_ENABLED(pVM) && !fNemNotifiedAlready)
1046 {
1047 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1048 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
1049 NEMHCNotifyPhysPageProtChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage),
1050 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRamHint, GCPhys),
1051 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
1052 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1053 }
1054#endif
1055 RT_NOREF(fNemNotifiedAlready);
1056 }
1057 else
1058 AssertRC(rc);
1059
1060 /* next */
1061 if (--cPages == 0)
1062 break;
1063 GCPhys += GUEST_PAGE_SIZE;
1064 }
1065
1066 pCur->cAliasedPages = 0;
1067 pCur->cTmpOffPages = 0;
1068
1069 /*
1070 * Check for partial start and end pages.
1071 */
1072 if (pCur->Key & GUEST_PAGE_OFFSET_MASK)
1073 pgmHandlerPhysicalRecalcPageState(pVM, pCur->Key - 1, false /* fAbove */, &pRamHint);
1074 if ((pCur->KeyLast & GUEST_PAGE_OFFSET_MASK) != GUEST_PAGE_OFFSET_MASK)
1075 pgmHandlerPhysicalRecalcPageState(pVM, pCur->KeyLast + 1, true /* fAbove */, &pRamHint);
1076}
1077
1078
1079#if 0 /* unused */
1080/**
1081 * Modify a physical page access handler.
1082 *
1083 * Modification can only be done to the range it self, not the type or anything else.
1084 *
1085 * @returns VBox status code.
1086 * For all return codes other than VERR_PGM_HANDLER_NOT_FOUND and VINF_SUCCESS the range is deregistered
1087 * and a new registration must be performed!
1088 * @param pVM The cross context VM structure.
1089 * @param GCPhysCurrent Current location.
1090 * @param GCPhys New location.
1091 * @param GCPhysLast New last location.
1092 */
1093VMMDECL(int) PGMHandlerPhysicalModify(PVMCC pVM, RTGCPHYS GCPhysCurrent, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast)
1094{
1095 /*
1096 * Remove it.
1097 */
1098 int rc;
1099 PGM_LOCK_VOID(pVM);
1100 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhysCurrent);
1101 if (pCur)
1102 {
1103 /*
1104 * Clear the ram flags. (We're gonna move or free it!)
1105 */
1106 pgmHandlerPhysicalResetRamFlags(pVM, pCur);
1107 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1108 @todo pCurType validation
1109 bool const fRestoreAsRAM = pCurType->pfnHandlerR3 /** @todo this isn't entirely correct. */
1110 && pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO;
1111
1112 /*
1113 * Validate the new range, modify and reinsert.
1114 */
1115 if (GCPhysLast >= GCPhys)
1116 {
1117 /*
1118 * We require the range to be within registered ram.
1119 * There is no apparent need to support ranges which cover more than one ram range.
1120 */
1121 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
1122 if ( pRam
1123 && GCPhys <= pRam->GCPhysLast
1124 && GCPhysLast >= pRam->GCPhys)
1125 {
1126 pCur->Core.Key = GCPhys;
1127 pCur->Core.KeyLast = GCPhysLast;
1128 pCur->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + 1) >> GUEST_PAGE_SHIFT;
1129
1130 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pCur->Core))
1131 {
1132 RTGCPHYS const cb = GCPhysLast - GCPhys + 1;
1133 PGMPHYSHANDLERKIND const enmKind = pCurType->enmKind;
1134
1135 /*
1136 * Set ram flags, flush shadow PT entries and finally tell REM about this.
1137 */
1138 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam, NULL, 0);
1139
1140 /** @todo NEM: not sure we need this notification... */
1141 NEMHCNotifyHandlerPhysicalModify(pVM, enmKind, GCPhysCurrent, GCPhys, cb, fRestoreAsRAM);
1142
1143 PGM_UNLOCK(pVM);
1144
1145 PGM_INVL_ALL_VCPU_TLBS(pVM);
1146 Log(("PGMHandlerPhysicalModify: GCPhysCurrent=%RGp -> GCPhys=%RGp GCPhysLast=%RGp\n",
1147 GCPhysCurrent, GCPhys, GCPhysLast));
1148 return VINF_SUCCESS;
1149 }
1150
1151 AssertMsgFailed(("Conflict! GCPhys=%RGp GCPhysLast=%RGp\n", GCPhys, GCPhysLast));
1152 rc = VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
1153 }
1154 else
1155 {
1156 AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));
1157 rc = VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
1158 }
1159 }
1160 else
1161 {
1162 AssertMsgFailed(("Invalid range %RGp-%RGp\n", GCPhys, GCPhysLast));
1163 rc = VERR_INVALID_PARAMETER;
1164 }
1165
1166 /*
1167 * Invalid new location, flush the cache and free it.
1168 * We've only gotta notify REM and free the memory.
1169 */
1170 if (VM_IS_NEM_ENABLED(pVM))
1171 pgmHandlerPhysicalDeregisterNotifyNEM(pVM, pCur);
1172 pVM->pgm.s.pLastPhysHandlerR0 = 0;
1173 pVM->pgm.s.pLastPhysHandlerR3 = 0;
1174 PGMHandlerPhysicalTypeRelease(pVM, pCur->hType);
1175 MMHyperFree(pVM, pCur);
1176 }
1177 else
1178 {
1179 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhysCurrent));
1180 rc = VERR_PGM_HANDLER_NOT_FOUND;
1181 }
1182
1183 PGM_UNLOCK(pVM);
1184 return rc;
1185}
1186#endif /* unused */
1187
1188
1189/**
1190 * Changes the user callback arguments associated with a physical access handler.
1191 *
1192 * @returns VBox status code.
1193 * @param pVM The cross context VM structure.
1194 * @param GCPhys Start physical address of the handler.
1195 * @param uUser User argument to the handlers.
1196 */
1197VMMDECL(int) PGMHandlerPhysicalChangeUserArg(PVMCC pVM, RTGCPHYS GCPhys, uint64_t uUser)
1198{
1199 /*
1200 * Find the handler and make the change.
1201 */
1202 int rc = PGM_LOCK(pVM);
1203 AssertRCReturn(rc, rc);
1204
1205 PPGMPHYSHANDLER pCur;
1206 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1207 if (RT_SUCCESS(rc))
1208 {
1209 Assert(pCur->Key == GCPhys);
1210 pCur->uUser = uUser;
1211 }
1212 else if (rc == VERR_NOT_FOUND)
1213 {
1214 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
1215 rc = VERR_PGM_HANDLER_NOT_FOUND;
1216 }
1217
1218 PGM_UNLOCK(pVM);
1219 return rc;
1220}
1221
1222#if 0 /* unused */
1223
1224/**
1225 * Splits a physical access handler in two.
1226 *
1227 * @returns VBox status code.
1228 * @param pVM The cross context VM structure.
1229 * @param GCPhys Start physical address of the handler.
1230 * @param GCPhysSplit The split address.
1231 */
1232VMMDECL(int) PGMHandlerPhysicalSplit(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysSplit)
1233{
1234 AssertReturn(GCPhys < GCPhysSplit, VERR_INVALID_PARAMETER);
1235
1236 /*
1237 * Do the allocation without owning the lock.
1238 */
1239 PPGMPHYSHANDLER pNew;
1240 int rc = MMHyperAlloc(pVM, sizeof(*pNew), 0, MM_TAG_PGM_HANDLERS, (void **)&pNew);
1241 if (RT_FAILURE(rc))
1242 return rc;
1243
1244 /*
1245 * Get the handler.
1246 */
1247 PGM_LOCK_VOID(pVM);
1248 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1249 if (RT_LIKELY(pCur))
1250 {
1251 if (RT_LIKELY(GCPhysSplit <= pCur->Core.KeyLast))
1252 {
1253 /*
1254 * Create new handler node for the 2nd half.
1255 */
1256 *pNew = *pCur;
1257 pNew->Core.Key = GCPhysSplit;
1258 pNew->cPages = (pNew->Core.KeyLast - (pNew->Core.Key & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
1259
1260 pCur->Core.KeyLast = GCPhysSplit - 1;
1261 pCur->cPages = (pCur->Core.KeyLast - (pCur->Core.Key & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
1262
1263 if (RT_LIKELY(RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pNew->Core)))
1264 {
1265 LogFlow(("PGMHandlerPhysicalSplit: %RGp-%RGp and %RGp-%RGp\n",
1266 pCur->Core.Key, pCur->Core.KeyLast, pNew->Core.Key, pNew->Core.KeyLast));
1267 PGM_UNLOCK(pVM);
1268 return VINF_SUCCESS;
1269 }
1270 AssertMsgFailed(("whu?\n"));
1271 rc = VERR_PGM_PHYS_HANDLER_IPE;
1272 }
1273 else
1274 {
1275 AssertMsgFailed(("outside range: %RGp-%RGp split %RGp\n", pCur->Core.Key, pCur->Core.KeyLast, GCPhysSplit));
1276 rc = VERR_INVALID_PARAMETER;
1277 }
1278 }
1279 else
1280 {
1281 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
1282 rc = VERR_PGM_HANDLER_NOT_FOUND;
1283 }
1284 PGM_UNLOCK(pVM);
1285 MMHyperFree(pVM, pNew);
1286 return rc;
1287}
1288
1289
1290/**
1291 * Joins up two adjacent physical access handlers which has the same callbacks.
1292 *
1293 * @returns VBox status code.
1294 * @param pVM The cross context VM structure.
1295 * @param GCPhys1 Start physical address of the first handler.
1296 * @param GCPhys2 Start physical address of the second handler.
1297 */
1298VMMDECL(int) PGMHandlerPhysicalJoin(PVMCC pVM, RTGCPHYS GCPhys1, RTGCPHYS GCPhys2)
1299{
1300 /*
1301 * Get the handlers.
1302 */
1303 int rc;
1304 PGM_LOCK_VOID(pVM);
1305 PPGMPHYSHANDLER pCur1 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys1);
1306 if (RT_LIKELY(pCur1))
1307 {
1308 PPGMPHYSHANDLER pCur2 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys2);
1309 if (RT_LIKELY(pCur2))
1310 {
1311 /*
1312 * Make sure that they are adjacent, and that they've got the same callbacks.
1313 */
1314 if (RT_LIKELY(pCur1->Core.KeyLast + 1 == pCur2->Core.Key))
1315 {
1316 if (RT_LIKELY(pCur1->hType == pCur2->hType))
1317 {
1318 PPGMPHYSHANDLER pCur3 = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys2);
1319 if (RT_LIKELY(pCur3 == pCur2))
1320 {
1321 pCur1->Core.KeyLast = pCur2->Core.KeyLast;
1322 pCur1->cPages = (pCur1->Core.KeyLast - (pCur1->Core.Key & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
1323 LogFlow(("PGMHandlerPhysicalJoin: %RGp-%RGp %RGp-%RGp\n",
1324 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
1325 pVM->pgm.s.pLastPhysHandlerR0 = 0;
1326 pVM->pgm.s.pLastPhysHandlerR3 = 0;
1327 PGMHandlerPhysicalTypeRelease(pVM, pCur2->hType);
1328 MMHyperFree(pVM, pCur2);
1329 PGM_UNLOCK(pVM);
1330 return VINF_SUCCESS;
1331 }
1332
1333 Assert(pCur3 == pCur2);
1334 rc = VERR_PGM_PHYS_HANDLER_IPE;
1335 }
1336 else
1337 {
1338 AssertMsgFailed(("mismatching handlers\n"));
1339 rc = VERR_ACCESS_DENIED;
1340 }
1341 }
1342 else
1343 {
1344 AssertMsgFailed(("not adjacent: %RGp-%RGp %RGp-%RGp\n",
1345 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
1346 rc = VERR_INVALID_PARAMETER;
1347 }
1348 }
1349 else
1350 {
1351 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys2));
1352 rc = VERR_PGM_HANDLER_NOT_FOUND;
1353 }
1354 }
1355 else
1356 {
1357 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys1));
1358 rc = VERR_PGM_HANDLER_NOT_FOUND;
1359 }
1360 PGM_UNLOCK(pVM);
1361 return rc;
1362
1363}
1364
1365#endif /* unused */
1366
1367/**
1368 * Resets any modifications to individual pages in a physical page access
1369 * handler region.
1370 *
1371 * This is used in pair with PGMHandlerPhysicalPageTempOff(),
1372 * PGMHandlerPhysicalPageAliasMmio2() or PGMHandlerPhysicalPageAliasHC().
1373 *
1374 * @returns VBox status code.
1375 * @param pVM The cross context VM structure.
1376 * @param GCPhys The start address of the handler regions, i.e. what you
1377 * passed to PGMR3HandlerPhysicalRegister(),
1378 * PGMHandlerPhysicalRegisterEx() or
1379 * PGMHandlerPhysicalModify().
1380 */
1381VMMDECL(int) PGMHandlerPhysicalReset(PVMCC pVM, RTGCPHYS GCPhys)
1382{
1383 LogFlow(("PGMHandlerPhysicalReset GCPhys=%RGp\n", GCPhys));
1384 int rc = PGM_LOCK(pVM);
1385 AssertRCReturn(rc, rc);
1386
1387 /*
1388 * Find the handler.
1389 */
1390 PPGMPHYSHANDLER pCur;
1391 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1392 if (RT_SUCCESS(rc))
1393 {
1394 Assert(pCur->Key == GCPhys);
1395
1396 /*
1397 * Validate kind.
1398 */
1399 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1400 switch (pCurType->enmKind)
1401 {
1402 case PGMPHYSHANDLERKIND_WRITE:
1403 case PGMPHYSHANDLERKIND_ALL:
1404 case PGMPHYSHANDLERKIND_MMIO: /* NOTE: Only use when clearing MMIO ranges with aliased MMIO2 pages! */
1405 {
1406 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysHandlerReset)); /** @todo move out of switch */
1407 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
1408 Assert(pRam);
1409 Assert(pRam->GCPhys <= pCur->Key);
1410 Assert(pRam->GCPhysLast >= pCur->KeyLast);
1411
1412 if (pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO)
1413 {
1414 /*
1415 * Reset all the PGMPAGETYPE_MMIO2_ALIAS_MMIO pages first and that's it.
1416 * This could probably be optimized a bit wrt to flushing, but I'm too lazy
1417 * to do that now...
1418 */
1419 if (pCur->cAliasedPages)
1420 {
1421 PPGMPAGE pPage = &pRam->aPages[(pCur->Key - pRam->GCPhys) >> GUEST_PAGE_SHIFT];
1422 RTGCPHYS GCPhysPage = pCur->Key;
1423 uint32_t cLeft = pCur->cPages;
1424 bool fFlushIemTlb = false;
1425 while (cLeft-- > 0)
1426 {
1427 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
1428 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO)
1429 {
1430 fFlushIemTlb |= PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO;
1431 Assert(pCur->cAliasedPages > 0);
1432 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhysPage, pRam,
1433 false /*fDoAccounting*/, false /*fFlushIemTlbs*/);
1434 --pCur->cAliasedPages;
1435#ifndef VBOX_STRICT
1436 if (pCur->cAliasedPages == 0)
1437 break;
1438#endif
1439 }
1440 Assert(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO);
1441 GCPhysPage += GUEST_PAGE_SIZE;
1442 pPage++;
1443 }
1444 Assert(pCur->cAliasedPages == 0);
1445
1446 /*
1447 * Flush IEM TLBs in case they contain any references to aliased pages.
1448 * This is only necessary for MMIO2 aliases.
1449 */
1450 if (fFlushIemTlb)
1451 IEMTlbInvalidateAllPhysicalAllCpus(pVM, NIL_VMCPUID, IEMTLBPHYSFLUSHREASON_HANDLER_RESET);
1452 }
1453 }
1454 else if (pCur->cTmpOffPages > 0)
1455 {
1456 /*
1457 * Set the flags and flush shadow PT entries.
1458 */
1459 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam, NULL /*pvBitmap*/, 0 /*offBitmap*/);
1460 }
1461
1462 pCur->cAliasedPages = 0;
1463 pCur->cTmpOffPages = 0;
1464
1465 rc = VINF_SUCCESS;
1466 break;
1467 }
1468
1469 /*
1470 * Invalid.
1471 */
1472 default:
1473 AssertMsgFailed(("Invalid type %d/%#x! Corruption!\n", pCurType->enmKind, pCur->hType));
1474 rc = VERR_PGM_PHYS_HANDLER_IPE;
1475 break;
1476 }
1477 }
1478 else if (rc == VERR_NOT_FOUND)
1479 {
1480 AssertMsgFailed(("Didn't find MMIO Range starting at %#x\n", GCPhys));
1481 rc = VERR_PGM_HANDLER_NOT_FOUND;
1482 }
1483
1484 PGM_UNLOCK(pVM);
1485 return rc;
1486}
1487
1488
1489/**
1490 * Special version of PGMHandlerPhysicalReset used by MMIO2 w/ dirty page
1491 * tracking.
1492 *
1493 * @returns VBox status code.
1494 * @param pVM The cross context VM structure.
1495 * @param GCPhys The start address of the handler region.
1496 * @param pvBitmap Dirty bitmap. Caller has cleared this already, only
1497 * dirty bits will be set. Caller also made sure it's big
1498 * enough.
1499 * @param offBitmap Dirty bitmap offset.
1500 * @remarks Caller must own the PGM critical section.
1501 */
1502DECLHIDDEN(int) pgmHandlerPhysicalResetMmio2WithBitmap(PVMCC pVM, RTGCPHYS GCPhys, void *pvBitmap, uint32_t offBitmap)
1503{
1504 LogFlow(("pgmHandlerPhysicalResetMmio2WithBitmap GCPhys=%RGp\n", GCPhys));
1505 PGM_LOCK_ASSERT_OWNER(pVM);
1506
1507 /*
1508 * Find the handler.
1509 */
1510 PPGMPHYSHANDLER pCur;
1511 int rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1512 if (RT_SUCCESS(rc))
1513 {
1514 Assert(pCur->Key == GCPhys);
1515
1516 /*
1517 * Validate kind.
1518 */
1519 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
1520 if ( pCurType
1521 && pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE)
1522 {
1523 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysHandlerReset));
1524
1525#ifdef VBOX_STRICT
1526 PPGMRAMRANGE const pRamStrict = pgmPhysGetRange(pVM, GCPhys);
1527 Assert(pRamStrict && pRamStrict->GCPhys <= pCur->Key);
1528 Assert(pRamStrict && pRamStrict->GCPhysLast >= pCur->KeyLast);
1529#endif
1530
1531 /*
1532 * Set the flags and flush shadow PT entries.
1533 */
1534 if (pCur->cTmpOffPages > 0)
1535 {
1536 PPGMRAMRANGE const pRam = pgmPhysGetRange(pVM, GCPhys);
1537 if (pRam) /* paranoia */
1538 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam, pvBitmap, offBitmap);
1539 else
1540 AssertFailed();
1541 pCur->cTmpOffPages = 0;
1542 }
1543 else
1544 rc = VINF_SUCCESS;
1545 }
1546 else
1547 {
1548 AssertFailed();
1549 rc = VERR_WRONG_TYPE;
1550 }
1551 }
1552 else if (rc == VERR_NOT_FOUND)
1553 {
1554 AssertMsgFailed(("Didn't find MMIO Range starting at %#x\n", GCPhys));
1555 rc = VERR_PGM_HANDLER_NOT_FOUND;
1556 }
1557
1558 return rc;
1559}
1560
1561
1562/**
1563 * Temporarily turns off the access monitoring of a page within a monitored
1564 * physical write/all page access handler region.
1565 *
1566 * Use this when no further \#PFs are required for that page. Be aware that
1567 * a page directory sync might reset the flags, and turn on access monitoring
1568 * for the page.
1569 *
1570 * The caller must do required page table modifications.
1571 *
1572 * @returns VBox status code.
1573 * @param pVM The cross context VM structure.
1574 * @param GCPhys The start address of the access handler. This
1575 * must be a fully page aligned range or we risk
1576 * messing up other handlers installed for the
1577 * start and end pages.
1578 * @param GCPhysPage The physical address of the page to turn off
1579 * access monitoring for.
1580 */
1581VMMDECL(int) PGMHandlerPhysicalPageTempOff(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage)
1582{
1583 LogFlow(("PGMHandlerPhysicalPageTempOff GCPhysPage=%RGp\n", GCPhysPage));
1584 int rc = PGM_LOCK(pVM);
1585 AssertRCReturn(rc, rc);
1586
1587 /*
1588 * Validate the range.
1589 */
1590 PPGMPHYSHANDLER pCur;
1591 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1592 if (RT_SUCCESS(rc))
1593 {
1594 Assert(pCur->Key == GCPhys);
1595 if (RT_LIKELY( GCPhysPage >= pCur->Key
1596 && GCPhysPage <= pCur->KeyLast))
1597 {
1598 Assert(!(pCur->Key & GUEST_PAGE_OFFSET_MASK));
1599 Assert((pCur->KeyLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK);
1600
1601 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
1602 AssertReturnStmt( pCurType
1603 && ( pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE
1604 || pCurType->enmKind == PGMPHYSHANDLERKIND_ALL),
1605 PGM_UNLOCK(pVM), VERR_ACCESS_DENIED);
1606
1607 /*
1608 * Change the page status.
1609 */
1610 PPGMPAGE pPage;
1611 PPGMRAMRANGE pRam;
1612 rc = pgmPhysGetPageAndRangeEx(pVM, GCPhysPage, &pPage, &pRam);
1613 AssertReturnStmt(RT_SUCCESS_NP(rc), PGM_UNLOCK(pVM), rc);
1614 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
1615 {
1616 PGM_PAGE_SET_HNDL_PHYS_STATE_ONLY(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
1617 pCur->cTmpOffPages++;
1618
1619#ifdef VBOX_WITH_NATIVE_NEM
1620 /* Tell NEM about the protection change (VGA is using this to track dirty pages). */
1621 if (VM_IS_NEM_ENABLED(pVM))
1622 {
1623 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1624 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
1625 NEMHCNotifyPhysPageProtChanged(pVM, GCPhysPage, PGM_PAGE_GET_HCPHYS(pPage),
1626 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
1627 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
1628 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1629 }
1630#endif
1631 }
1632 PGM_UNLOCK(pVM);
1633 return VINF_SUCCESS;
1634 }
1635 PGM_UNLOCK(pVM);
1636 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n", GCPhysPage, pCur->Key, pCur->KeyLast));
1637 return VERR_INVALID_PARAMETER;
1638 }
1639 PGM_UNLOCK(pVM);
1640
1641 if (rc == VERR_NOT_FOUND)
1642 {
1643 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
1644 return VERR_PGM_HANDLER_NOT_FOUND;
1645 }
1646 return rc;
1647}
1648
1649
1650#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
1651/**
1652 * Resolves an MMIO2 page.
1653 *
1654 * Caller as taken the PGM lock.
1655 *
1656 * @returns Pointer to the page if valid, NULL otherwise
1657 * @param pVM The cross context VM structure.
1658 * @param pDevIns The device owning it.
1659 * @param hMmio2 The MMIO2 region.
1660 * @param offMmio2Page The offset into the region.
1661 */
1662static PPGMPAGE pgmPhysResolveMmio2PageLocked(PVMCC pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, RTGCPHYS offMmio2Page)
1663{
1664 /* Only works if the handle is in the handle table! */
1665 AssertReturn(hMmio2 != 0, NULL);
1666 uint32_t const cMmio2Ranges = RT_MIN(pVM->pgm.s.cMmio2Ranges, RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges));
1667 AssertReturn(hMmio2 <= cMmio2Ranges, NULL);
1668 AssertCompile(RT_ELEMENTS(pVM->pgm.s.apMmio2RamRanges) == RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges));
1669# ifdef IN_RING0
1670 AssertCompile(RT_ELEMENTS(pVM->pgmr0.s.apMmio2RamRanges) == RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges));
1671 AssertCompile(RT_ELEMENTS(pVM->pgmr0.s.acMmio2RangePages) == RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges));
1672# endif
1673 uint32_t const idxFirst = hMmio2 - 1U;
1674
1675 /* Must check the first one for PGMREGMMIO2RANGE_F_FIRST_CHUNK. */
1676 AssertReturn(pVM->pgm.s.aMmio2Ranges[idxFirst].fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK, NULL);
1677# ifdef IN_RING0
1678 AssertReturn(pVM->pgmr0.s.ahMmio2MapObjs[idxFirst] != NIL_RTR0MEMOBJ, NULL); /* Only the first chunk has a backing object. */
1679# endif
1680
1681 /* Loop thru the sub-ranges till we find the one covering offMmio2. */
1682 for (uint32_t idx = idxFirst; idx < cMmio2Ranges; idx++)
1683 {
1684# ifdef IN_RING3
1685 AssertReturn(pVM->pgm.s.aMmio2Ranges[idx].pDevInsR3 == pDevIns, NULL);
1686# else
1687 AssertReturn(pVM->pgm.s.aMmio2Ranges[idx].pDevInsR3 == pDevIns->pDevInsForR3, NULL);
1688# endif
1689
1690 /* Does it match the offset? */
1691 PPGMRAMRANGE const pRamRange = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apMmio2RamRanges[idx];
1692 AssertReturn(pRamRange, NULL);
1693# ifdef IN_RING3
1694 RTGCPHYS const cbRange = RT_MIN(pRamRange->cb, pVM->pgm.s.aMmio2Ranges[idx].cbReal);
1695# else
1696 RTGCPHYS const cbRange = RT_MIN(pRamRange->cb, (RTGCPHYS)pVM->pgmr0.s.acMmio2RangePages[idx] << GUEST_PAGE_SHIFT);
1697# endif
1698 if (offMmio2Page < cbRange)
1699 return &pRamRange->aPages[offMmio2Page >> GUEST_PAGE_SHIFT];
1700
1701 /* Advance. */
1702 AssertReturn(!(pVM->pgm.s.aMmio2Ranges[idx].fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK), NULL);
1703 offMmio2Page -= cbRange;
1704 }
1705 AssertFailed();
1706 return NULL;
1707}
1708#endif /* !VBOX_WITH_ONLY_PGM_NEM_MODE */
1709
1710
1711/**
1712 * Replaces an MMIO page with an MMIO2 page.
1713 *
1714 * This is a worker for IOMMMIOMapMMIO2Page that works in a similar way to
1715 * PGMHandlerPhysicalPageTempOff but for an MMIO page. Since an MMIO page has no
1716 * backing, the caller must provide a replacement page. For various reasons the
1717 * replacement page must be an MMIO2 page.
1718 *
1719 * The caller must do required page table modifications. You can get away
1720 * without making any modifications since it's an MMIO page, the cost is an extra
1721 * \#PF which will the resync the page.
1722 *
1723 * Call PGMHandlerPhysicalReset() to restore the MMIO page.
1724 *
1725 * The caller may still get handler callback even after this call and must be
1726 * able to deal correctly with such calls. The reason for these callbacks are
1727 * either that we're executing in the recompiler (which doesn't know about this
1728 * arrangement) or that we've been restored from saved state (where we won't
1729 * save the change).
1730 *
1731 * @returns VBox status code.
1732 * @param pVM The cross context VM structure.
1733 * @param GCPhys The start address of the access handler. This
1734 * must be a fully page aligned range or we risk
1735 * messing up other handlers installed for the
1736 * start and end pages.
1737 * @param GCPhysPage The physical address of the page to turn off
1738 * access monitoring for and replace with the MMIO2
1739 * page.
1740 * @param pDevIns The device instance owning @a hMmio2.
1741 * @param hMmio2 Handle to the MMIO2 region containing the page
1742 * to remap in the the MMIO page at @a GCPhys.
1743 * @param offMmio2PageRemap The offset into @a hMmio2 of the MMIO2 page that
1744 * should serve as backing memory.
1745 *
1746 * @remark May cause a page pool flush if used on a page that is already
1747 * aliased.
1748 *
1749 * @note This trick does only work reliably if the two pages are never ever
1750 * mapped in the same page table. If they are the page pool code will
1751 * be confused should either of them be flushed. See the special case
1752 * of zero page aliasing mentioned in #3170.
1753 *
1754 */
1755VMMDECL(int) PGMHandlerPhysicalPageAliasMmio2(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage,
1756 PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, RTGCPHYS offMmio2PageRemap)
1757{
1758#ifdef VBOX_WITH_ONLY_PGM_NEM_MODE
1759 RT_NOREF(pVM, GCPhys, GCPhysPage, pDevIns, hMmio2, offMmio2PageRemap);
1760 AssertFailedReturn(VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE);
1761#else
1762# ifdef VBOX_WITH_PGM_NEM_MODE
1763 AssertReturn(!VM_IS_NEM_ENABLED(pVM) || !pVM->pgm.s.fNemMode, VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE);
1764# endif
1765 int rc = PGM_LOCK(pVM);
1766 AssertRCReturn(rc, rc);
1767
1768 /*
1769 * Resolve the MMIO2 reference.
1770 */
1771 PPGMPAGE pPageRemap = pgmPhysResolveMmio2PageLocked(pVM, pDevIns, hMmio2, offMmio2PageRemap);
1772 if (RT_LIKELY(pPageRemap))
1773 AssertMsgReturnStmt(PGM_PAGE_GET_TYPE(pPageRemap) == PGMPAGETYPE_MMIO2,
1774 ("hMmio2=%RU64 offMmio2PageRemap=%RGp %R[pgmpage]\n", hMmio2, offMmio2PageRemap, pPageRemap),
1775 PGM_UNLOCK(pVM), VERR_PGM_PHYS_NOT_MMIO2);
1776 else
1777 {
1778 PGM_UNLOCK(pVM);
1779 return VERR_OUT_OF_RANGE;
1780 }
1781
1782 /*
1783 * Lookup and validate the range.
1784 */
1785 PPGMPHYSHANDLER pCur;
1786 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1787 if (RT_SUCCESS(rc))
1788 {
1789 Assert(pCur->Key == GCPhys);
1790 if (RT_LIKELY( GCPhysPage >= pCur->Key
1791 && GCPhysPage <= pCur->KeyLast))
1792 {
1793 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1794 AssertReturnStmt(pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO, PGM_UNLOCK(pVM), VERR_ACCESS_DENIED);
1795 AssertReturnStmt(!(pCur->Key & GUEST_PAGE_OFFSET_MASK), PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
1796 AssertReturnStmt((pCur->KeyLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK,
1797 PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
1798
1799 /*
1800 * Validate the page.
1801 */
1802 PPGMPAGE pPage;
1803 PPGMRAMRANGE pRam;
1804 rc = pgmPhysGetPageAndRangeEx(pVM, GCPhysPage, &pPage, &pRam);
1805 AssertReturnStmt(RT_SUCCESS_NP(rc), PGM_UNLOCK(pVM), rc);
1806 if (PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO)
1807 {
1808 AssertMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO,
1809 ("GCPhysPage=%RGp %R[pgmpage]\n", GCPhysPage, pPage),
1810 VERR_PGM_PHYS_NOT_MMIO2);
1811 if (PGM_PAGE_GET_HCPHYS(pPage) == PGM_PAGE_GET_HCPHYS(pPageRemap))
1812 {
1813 PGM_UNLOCK(pVM);
1814 return VINF_PGM_HANDLER_ALREADY_ALIASED;
1815 }
1816
1817 /*
1818 * The page is already mapped as some other page, reset it
1819 * to an MMIO/ZERO page before doing the new mapping.
1820 */
1821 Log(("PGMHandlerPhysicalPageAliasMmio2: GCPhysPage=%RGp (%R[pgmpage]; %RHp -> %RHp\n",
1822 GCPhysPage, pPage, PGM_PAGE_GET_HCPHYS(pPage), PGM_PAGE_GET_HCPHYS(pPageRemap)));
1823 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhysPage, pRam,
1824 false /*fDoAccounting*/, false /*fFlushIemTlbs*/);
1825 pCur->cAliasedPages--;
1826
1827 /* Since this may be present in the TLB and now be wrong, invalid
1828 the guest physical address part of the IEM TLBs. Note, we do
1829 this here as we will not invalid */
1830 IEMTlbInvalidateAllPhysicalAllCpus(pVM, NIL_VMCPUID, IEMTLBPHYSFLUSHREASON_MMIO2_ALIAS);
1831 }
1832 Assert(PGM_PAGE_IS_ZERO(pPage));
1833
1834 /*
1835 * Do the actual remapping here.
1836 * This page now serves as an alias for the backing memory specified.
1837 */
1838 LogFlow(("PGMHandlerPhysicalPageAliasMmio2: %RGp (%R[pgmpage]) alias for %RU64/%RGp (%R[pgmpage])\n",
1839 GCPhysPage, pPage, hMmio2, offMmio2PageRemap, pPageRemap ));
1840 PGM_PAGE_SET_HCPHYS(pVM, pPage, PGM_PAGE_GET_HCPHYS(pPageRemap));
1841 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_MMIO2_ALIAS_MMIO);
1842 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1843 PGM_PAGE_SET_PAGEID(pVM, pPage, PGM_PAGE_GET_PAGEID(pPageRemap));
1844 PGM_PAGE_SET_HNDL_PHYS_STATE_ONLY(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
1845 pCur->cAliasedPages++;
1846 Assert(pCur->cAliasedPages <= pCur->cPages);
1847
1848 /*
1849 * Flush its TLB entry.
1850 *
1851 * Not calling IEMTlbInvalidateAllPhysicalAllCpus here to conserve
1852 * all the other IEM TLB entires. When this one is kicked out and
1853 * reloaded, it will be using the MMIO2 alias, but till then we'll
1854 * continue doing MMIO.
1855 */
1856 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
1857 /** @todo Do some preformance checks of calling
1858 * IEMTlbInvalidateAllPhysicalAllCpus when in IEM mode, to see if it
1859 * actually makes sense or not. Screen updates are typically massive
1860 * and important when this kind of aliasing is used, so it may pay of... */
1861
1862# ifdef VBOX_WITH_NATIVE_NEM
1863 /* Tell NEM about the backing and protection change. */
1864 if (VM_IS_NEM_ENABLED(pVM))
1865 {
1866 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1867 NEMHCNotifyPhysPageChanged(pVM, GCPhysPage, pVM->pgm.s.HCPhysZeroPg, PGM_PAGE_GET_HCPHYS(pPage),
1868 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
1869 pgmPhysPageCalcNemProtection(pPage, PGMPAGETYPE_MMIO2_ALIAS_MMIO),
1870 PGMPAGETYPE_MMIO2_ALIAS_MMIO, &u2State);
1871 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1872 }
1873# endif
1874 LogFlow(("PGMHandlerPhysicalPageAliasMmio2: => %R[pgmpage]\n", pPage));
1875 PGM_UNLOCK(pVM);
1876 return VINF_SUCCESS;
1877 }
1878
1879 PGM_UNLOCK(pVM);
1880 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n", GCPhysPage, pCur->Key, pCur->KeyLast));
1881 return VERR_INVALID_PARAMETER;
1882 }
1883
1884 PGM_UNLOCK(pVM);
1885 if (rc == VERR_NOT_FOUND)
1886 {
1887 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
1888 return VERR_PGM_HANDLER_NOT_FOUND;
1889 }
1890 return rc;
1891#endif /* !VBOX_WITH_ONLY_PGM_NEM_MODE */
1892}
1893
1894
1895/**
1896 * Replaces an MMIO page with an arbitrary HC page in the shadow page tables.
1897 *
1898 * This differs from PGMHandlerPhysicalPageAliasMmio2 in that the page doesn't
1899 * need to be a known MMIO2 page and that only shadow paging may access the
1900 * page. The latter distinction is important because the only use for this
1901 * feature is for mapping the special APIC access page that VT-x uses to detect
1902 * APIC MMIO operations, the page is shared between all guest CPUs and actually
1903 * not written to. At least at the moment.
1904 *
1905 * The caller must do required page table modifications. You can get away
1906 * without making any modifications since it's an MMIO page, the cost is an extra
1907 * \#PF which will the resync the page.
1908 *
1909 * Call PGMHandlerPhysicalReset() to restore the MMIO page.
1910 *
1911 *
1912 * @returns VBox status code.
1913 * @param pVM The cross context VM structure.
1914 * @param GCPhys The start address of the access handler. This
1915 * must be a fully page aligned range or we risk
1916 * messing up other handlers installed for the
1917 * start and end pages.
1918 * @param GCPhysPage The physical address of the page to turn off
1919 * access monitoring for.
1920 * @param HCPhysPageRemap The physical address of the HC page that
1921 * serves as backing memory.
1922 *
1923 * @remark May cause a page pool flush if used on a page that is already
1924 * aliased.
1925 */
1926VMMDECL(int) PGMHandlerPhysicalPageAliasHC(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage, RTHCPHYS HCPhysPageRemap)
1927{
1928#ifdef VBOX_WITH_ONLY_PGM_NEM_MODE
1929 RT_NOREF(pVM, GCPhys, GCPhysPage, HCPhysPageRemap);
1930 AssertFailedReturn(VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE);
1931#else
1932/// Assert(!IOMIsLockOwner(pVM)); /* We mustn't own any other locks when calling this */
1933# ifdef VBOX_WITH_PGM_NEM_MODE
1934 AssertReturn(!VM_IS_NEM_ENABLED(pVM) || !pVM->pgm.s.fNemMode, VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE);
1935# endif
1936 int rc = PGM_LOCK(pVM);
1937 AssertRCReturn(rc, rc);
1938
1939 /*
1940 * Lookup and validate the range.
1941 */
1942 PPGMPHYSHANDLER pCur;
1943 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1944 if (RT_SUCCESS(rc))
1945 {
1946 Assert(pCur->Key == GCPhys);
1947 if (RT_LIKELY( GCPhysPage >= pCur->Key
1948 && GCPhysPage <= pCur->KeyLast))
1949 {
1950 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1951 AssertReturnStmt(pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO, PGM_UNLOCK(pVM), VERR_ACCESS_DENIED);
1952 AssertReturnStmt(!(pCur->Key & GUEST_PAGE_OFFSET_MASK), PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
1953 AssertReturnStmt((pCur->KeyLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK,
1954 PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
1955
1956 /*
1957 * Get and validate the pages.
1958 */
1959 PPGMPAGE pPage = NULL;
1960# ifdef VBOX_WITH_NATIVE_NEM
1961 PPGMRAMRANGE pRam = NULL;
1962 rc = pgmPhysGetPageAndRangeEx(pVM, GCPhysPage, &pPage, &pRam);
1963# else
1964 rc = pgmPhysGetPageEx(pVM, GCPhysPage, &pPage);
1965# endif
1966 AssertReturnStmt(RT_SUCCESS_NP(rc), PGM_UNLOCK(pVM), rc);
1967 if (PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO)
1968 {
1969 PGM_UNLOCK(pVM);
1970 AssertMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO,
1971 ("GCPhysPage=%RGp %R[pgmpage]\n", GCPhysPage, pPage),
1972 VERR_PGM_PHYS_NOT_MMIO2);
1973 return VINF_PGM_HANDLER_ALREADY_ALIASED;
1974 }
1975 Assert(PGM_PAGE_IS_ZERO(pPage));
1976
1977 /*
1978 * Do the actual remapping here.
1979 * This page now serves as an alias for the backing memory
1980 * specified as far as shadow paging is concerned.
1981 */
1982 LogFlow(("PGMHandlerPhysicalPageAliasHC: %RGp (%R[pgmpage]) alias for %RHp\n",
1983 GCPhysPage, pPage, HCPhysPageRemap));
1984 PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhysPageRemap);
1985 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_SPECIAL_ALIAS_MMIO);
1986 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1987 PGM_PAGE_SET_PAGEID(pVM, pPage, NIL_GMM_PAGEID);
1988 PGM_PAGE_SET_HNDL_PHYS_STATE_ONLY(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
1989 pCur->cAliasedPages++;
1990 Assert(pCur->cAliasedPages <= pCur->cPages);
1991
1992 /*
1993 * Flush its TLB entry.
1994 *
1995 * Not calling IEMTlbInvalidateAllPhysicalAllCpus here as special
1996 * aliased MMIO pages are handled like MMIO by the IEM TLB.
1997 */
1998 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
1999
2000# ifdef VBOX_WITH_NATIVE_NEM
2001 /* Tell NEM about the backing and protection change. */
2002 if (VM_IS_NEM_ENABLED(pVM))
2003 {
2004 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
2005 NEMHCNotifyPhysPageChanged(pVM, GCPhysPage, pVM->pgm.s.HCPhysZeroPg, PGM_PAGE_GET_HCPHYS(pPage),
2006 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
2007 pgmPhysPageCalcNemProtection(pPage, PGMPAGETYPE_SPECIAL_ALIAS_MMIO),
2008 PGMPAGETYPE_SPECIAL_ALIAS_MMIO, &u2State);
2009 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
2010 }
2011# endif
2012 LogFlow(("PGMHandlerPhysicalPageAliasHC: => %R[pgmpage]\n", pPage));
2013 PGM_UNLOCK(pVM);
2014 return VINF_SUCCESS;
2015 }
2016 PGM_UNLOCK(pVM);
2017 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n", GCPhysPage, pCur->Key, pCur->KeyLast));
2018 return VERR_INVALID_PARAMETER;
2019 }
2020 PGM_UNLOCK(pVM);
2021
2022 if (rc == VERR_NOT_FOUND)
2023 {
2024 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
2025 return VERR_PGM_HANDLER_NOT_FOUND;
2026 }
2027 return rc;
2028#endif /* !VBOX_WITH_ONLY_PGM_NEM_MODE */
2029}
2030
2031
2032/**
2033 * Checks if a physical range is handled
2034 *
2035 * @returns boolean
2036 * @param pVM The cross context VM structure.
2037 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister().
2038 * @remarks Caller must take the PGM lock...
2039 * @thread EMT.
2040 */
2041VMMDECL(bool) PGMHandlerPhysicalIsRegistered(PVMCC pVM, RTGCPHYS GCPhys)
2042{
2043 /*
2044 * Find the handler.
2045 */
2046 PGM_LOCK_VOID(pVM);
2047 PPGMPHYSHANDLER pCur;
2048 int rc = pgmHandlerPhysicalLookup(pVM, GCPhys, &pCur);
2049 if (RT_SUCCESS(rc))
2050 {
2051#ifdef VBOX_STRICT
2052 Assert(GCPhys >= pCur->Key && GCPhys <= pCur->KeyLast);
2053 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
2054 Assert( pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE
2055 || pCurType->enmKind == PGMPHYSHANDLERKIND_ALL
2056 || pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO);
2057#endif
2058 PGM_UNLOCK(pVM);
2059 return true;
2060 }
2061 PGM_UNLOCK(pVM);
2062 return false;
2063}
2064
2065
2066/**
2067 * Checks if it's an disabled all access handler or write access handler at the
2068 * given address.
2069 *
2070 * @returns true if it's an all access handler, false if it's a write access
2071 * handler.
2072 * @param pVM The cross context VM structure.
2073 * @param GCPhys The address of the page with a disabled handler.
2074 *
2075 * @remarks The caller, PGMR3PhysTlbGCPhys2Ptr, must hold the PGM lock.
2076 */
2077bool pgmHandlerPhysicalIsAll(PVMCC pVM, RTGCPHYS GCPhys)
2078{
2079 PGM_LOCK_VOID(pVM);
2080 PPGMPHYSHANDLER pCur;
2081 int rc = pgmHandlerPhysicalLookup(pVM, GCPhys, &pCur);
2082 AssertRCReturnStmt(rc, PGM_UNLOCK(pVM), true);
2083
2084 /* Only whole pages can be disabled. */
2085 Assert( pCur->Key <= (GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK)
2086 && pCur->KeyLast >= (GCPhys | GUEST_PAGE_OFFSET_MASK));
2087
2088 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
2089 Assert( pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE
2090 || pCurType->enmKind == PGMPHYSHANDLERKIND_ALL
2091 || pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO); /* sanity */
2092 bool const fRet = pCurType->enmKind != PGMPHYSHANDLERKIND_WRITE;
2093 PGM_UNLOCK(pVM);
2094 return fRet;
2095}
2096
2097#ifdef VBOX_STRICT
2098
2099/**
2100 * State structure used by the PGMAssertHandlerAndFlagsInSync() function
2101 * and its AVL enumerators.
2102 */
2103typedef struct PGMAHAFIS
2104{
2105 /** The current physical address. */
2106 RTGCPHYS GCPhys;
2107 /** Number of errors. */
2108 unsigned cErrors;
2109 /** Pointer to the VM. */
2110 PVM pVM;
2111} PGMAHAFIS, *PPGMAHAFIS;
2112
2113
2114/**
2115 * Asserts that the handlers+guest-page-tables == ramrange-flags and
2116 * that the physical addresses associated with virtual handlers are correct.
2117 *
2118 * @returns Number of mismatches.
2119 * @param pVM The cross context VM structure.
2120 */
2121VMMDECL(unsigned) PGMAssertHandlerAndFlagsInSync(PVMCC pVM)
2122{
2123 PGMAHAFIS State;
2124 State.GCPhys = 0;
2125 State.cErrors = 0;
2126 State.pVM = pVM;
2127
2128 PGM_LOCK_ASSERT_OWNER(pVM);
2129
2130 /*
2131 * Check the RAM flags against the handlers.
2132 */
2133 PPGMPHYSHANDLERTREE const pPhysHandlerTree = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree;
2134 uint32_t const cLookupEntries = RT_MIN(pVM->pgm.s.RamRangeUnion.cLookupEntries,
2135 RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup));
2136 for (uint32_t idxLookup = 0; idxLookup < cLookupEntries; idxLookup++)
2137 {
2138 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]);
2139 AssertContinue(idRamRange < RT_ELEMENTS(pVM->pgm.s.apRamRanges));
2140 PPGMRAMRANGE const pRam = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges[idRamRange];
2141 AssertContinue(pRam);
2142 const uint32_t cPages = pRam->cb >> GUEST_PAGE_SHIFT;
2143 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2144 {
2145 PGMPAGE const *pPage = &pRam->aPages[iPage];
2146 if (PGM_PAGE_HAS_ANY_HANDLERS(pPage))
2147 {
2148 State.GCPhys = pRam->GCPhys + (iPage << GUEST_PAGE_SHIFT);
2149
2150 /*
2151 * Physical first - calculate the state based on the handlers
2152 * active on the page, then compare.
2153 */
2154 if (PGM_PAGE_HAS_ANY_PHYSICAL_HANDLERS(pPage))
2155 {
2156 /* the first */
2157 PPGMPHYSHANDLER pPhys;
2158 int rc = pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, State.GCPhys, &pPhys);
2159 if (rc == VERR_NOT_FOUND)
2160 {
2161 rc = pPhysHandlerTree->lookupMatchingOrAbove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator,
2162 State.GCPhys, &pPhys);
2163 if (RT_SUCCESS(rc))
2164 {
2165 Assert(pPhys->Key >= State.GCPhys);
2166 if (pPhys->Key > (State.GCPhys + GUEST_PAGE_SIZE - 1))
2167 pPhys = NULL;
2168 }
2169 else
2170 AssertLogRelMsgReturn(rc == VERR_NOT_FOUND, ("rc=%Rrc GCPhys=%RGp\n", rc, State.GCPhys), 999);
2171 }
2172 else
2173 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("rc=%Rrc GCPhys=%RGp\n", rc, State.GCPhys), 999);
2174
2175 if (pPhys)
2176 {
2177 PCPGMPHYSHANDLERTYPEINT pPhysType = pgmHandlerPhysicalTypeHandleToPtr(pVM, pPhys->hType);
2178 unsigned uState = pPhysType->uState;
2179 bool const fNotInHm = pPhysType->fNotInHm; /* whole pages, so no need to accumulate sub-page configs. */
2180
2181 /* more? */
2182 while (pPhys->KeyLast < (State.GCPhys | GUEST_PAGE_OFFSET_MASK))
2183 {
2184 PPGMPHYSHANDLER pPhys2;
2185 rc = pPhysHandlerTree->lookupMatchingOrAbove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator,
2186 pPhys->KeyLast + 1, &pPhys2);
2187 if (rc == VERR_NOT_FOUND)
2188 break;
2189 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("rc=%Rrc KeyLast+1=%RGp\n", rc, pPhys->KeyLast + 1), 999);
2190 if (pPhys2->Key > (State.GCPhys | GUEST_PAGE_OFFSET_MASK))
2191 break;
2192 PCPGMPHYSHANDLERTYPEINT pPhysType2 = pgmHandlerPhysicalTypeHandleToPtr(pVM, pPhys2->hType);
2193 uState = RT_MAX(uState, pPhysType2->uState);
2194 pPhys = pPhys2;
2195 }
2196
2197 /* compare.*/
2198 if ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != uState
2199 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
2200 {
2201 AssertMsgFailed(("ram range vs phys handler flags mismatch. GCPhys=%RGp state=%d expected=%d %s\n",
2202 State.GCPhys, PGM_PAGE_GET_HNDL_PHYS_STATE(pPage), uState, pPhysType->pszDesc));
2203 State.cErrors++;
2204 }
2205 AssertMsgStmt(PGM_PAGE_IS_HNDL_PHYS_NOT_IN_HM(pPage) == fNotInHm,
2206 ("ram range vs phys handler flags mismatch. GCPhys=%RGp fNotInHm=%d, %d %s\n",
2207 State.GCPhys, PGM_PAGE_IS_HNDL_PHYS_NOT_IN_HM(pPage), fNotInHm, pPhysType->pszDesc),
2208 State.cErrors++);
2209 }
2210 else
2211 {
2212 AssertMsgFailed(("ram range vs phys handler mismatch. no handler for GCPhys=%RGp\n", State.GCPhys));
2213 State.cErrors++;
2214 }
2215 }
2216 }
2217 } /* foreach page in ram range. */
2218 } /* foreach ram range. */
2219
2220 /*
2221 * Do the reverse check for physical handlers.
2222 */
2223 /** @todo */
2224
2225 return State.cErrors;
2226}
2227
2228#endif /* VBOX_STRICT */
2229
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette