VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp@ 6862

Last change on this file since 6862 was 6862, checked in by vboxsync, 17 years ago

Removed the PGMPHYSHANDLERTYPE_PHYSICAL value as it was never and will never be used.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 36.7 KB
Line 
1/* $Id: PGMAllHandler.cpp 6862 2008-02-08 10:38:38Z vboxsync $ */
2/** @file
3 * PGM - Page Manager / Monitor, Access Handlers.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM
23#include <VBox/dbgf.h>
24#include <VBox/pgm.h>
25#include <VBox/iom.h>
26#include <VBox/mm.h>
27#include <VBox/em.h>
28#include <VBox/stam.h>
29#include <VBox/rem.h>
30#include <VBox/dbgf.h>
31#include <VBox/rem.h>
32#include "PGMInternal.h"
33#include <VBox/vm.h>
34
35#include <VBox/log.h>
36#include <iprt/assert.h>
37#include <iprt/asm.h>
38#include <iprt/string.h>
39#include <VBox/param.h>
40#include <VBox/err.h>
41#include <VBox/selm.h>
42
43
44/*******************************************************************************
45* Internal Functions *
46*******************************************************************************/
47static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVM pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam);
48static void pgmHandlerPhysicalDeregisterNotifyREM(PVM pVM, PPGMPHYSHANDLER pCur);
49static void pgmHandlerPhysicalResetRamFlags(PVM pVM, PPGMPHYSHANDLER pCur);
50
51
52
53/**
54 * Register a access handler for a physical range.
55 *
56 * @returns VBox status code.
57 * @retval VINF_SUCCESS when successfully installed.
58 * @retval VINF_PGM_GCPHYS_ALIASED when the shadow PTs could be updated because
59 * the guest page aliased or/and mapped by multiple PTs. A CR3 sync has been
60 * flagged together with a pool clearing.
61 * @retval VERR_PGM_HANDLER_PHYSICAL_CONFLICT if the range conflicts with an existing
62 * one. A debug assertion is raised.
63 *
64 * @param pVM VM Handle.
65 * @param enmType Handler type. Any of the PGMPHYSHANDLERTYPE_PHYSICAL* enums.
66 * @param GCPhys Start physical address.
67 * @param GCPhysLast Last physical address. (inclusive)
68 * @param pfnHandlerR3 The R3 handler.
69 * @param pvUserR3 User argument to the R3 handler.
70 * @param pfnHandlerR0 The R0 handler.
71 * @param pvUserR0 User argument to the R0 handler.
72 * @param pfnHandlerGC The GC handler.
73 * @param pvUserGC User argument to the GC handler.
74 * This must be a GC pointer because it will be relocated!
75 * @param pszDesc Pointer to description string. This must not be freed.
76 */
77PGMDECL(int) PGMHandlerPhysicalRegisterEx(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast,
78 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3, RTR3PTR pvUserR3,
79 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0, RTR0PTR pvUserR0,
80 GCPTRTYPE(PFNPGMGCPHYSHANDLER) pfnHandlerGC, RTGCPTR pvUserGC,
81 R3PTRTYPE(const char *) pszDesc)
82{
83 Log(("PGMHandlerPhysicalRegisterEx: enmType=%d GCPhys=%VGp GCPhysLast=%VGp pfnHandlerR3=%VHv pvUserR3=%VHv pfnHandlerR0=%VHv pvUserR0=%VHv pfnHandlerGC=%VGv pvUserGC=%VGv pszDesc=%s\n",
84 enmType, GCPhys, GCPhysLast, pfnHandlerR3, pvUserR3, pfnHandlerR0, pvUserR0, pfnHandlerGC, pvUserGC, HCSTRING(pszDesc)));
85
86 /*
87 * Validate input.
88 */
89 if (GCPhys >= GCPhysLast)
90 {
91 AssertMsgFailed(("GCPhys >= GCPhysLast (%#x >= %#x)\n", GCPhys, GCPhysLast));
92 return VERR_INVALID_PARAMETER;
93 }
94 switch (enmType)
95 {
96 case PGMPHYSHANDLERTYPE_MMIO:
97 case PGMPHYSHANDLERTYPE_PHYSICAL_WRITE:
98 case PGMPHYSHANDLERTYPE_PHYSICAL_ALL:
99 break;
100 default:
101 AssertMsgFailed(("Invalid input enmType=%d!\n", enmType));
102 return VERR_INVALID_PARAMETER;
103 }
104 if ( (RTGCUINTPTR)pvUserGC >= 0x10000
105 && MMHyperHC2GC(pVM, MMHyperGC2HC(pVM, pvUserGC)) != pvUserGC)
106 {
107 AssertMsgFailed(("Not GC pointer! pvUserGC=%VGv\n", pvUserGC));
108 return VERR_INVALID_PARAMETER;
109 }
110 AssertReturn(pfnHandlerR3 || pfnHandlerR0 || pfnHandlerGC, VERR_INVALID_PARAMETER);
111
112 /*
113 * We require the range to be within registered ram.
114 * There is no apparent need to support ranges which cover more than one ram range.
115 */
116 PPGMRAMRANGE pRam = CTXALLSUFF(pVM->pgm.s.pRamRanges);
117 while (pRam && GCPhys > pRam->GCPhysLast)
118 pRam = CTXALLSUFF(pRam->pNext);
119 if ( !pRam
120 || GCPhysLast < pRam->GCPhys
121 || GCPhys > pRam->GCPhysLast)
122 {
123#ifdef IN_RING3
124 /*
125 * If this is an MMIO registration, we'll just add a range for it.
126 */
127 if ( enmType == PGMPHYSHANDLERTYPE_MMIO
128 && ( !pRam
129 || GCPhysLast < pRam->GCPhys)
130 )
131 {
132 size_t cb = GCPhysLast - GCPhys + 1;
133 Assert(cb == RT_ALIGN_Z(cb, PAGE_SIZE));
134 int rc = PGMR3PhysRegister(pVM, NULL, GCPhys, cb, MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO, NULL, pszDesc);
135 if (VBOX_FAILURE(rc))
136 return rc;
137
138 /* search again. */
139 pRam = CTXALLSUFF(pVM->pgm.s.pRamRanges);
140 while (pRam && GCPhys > pRam->GCPhysLast)
141 pRam = CTXALLSUFF(pRam->pNext);
142 }
143
144 if ( !pRam
145 || GCPhysLast < pRam->GCPhys
146 || GCPhys > pRam->GCPhysLast)
147#endif /* IN_RING3 */
148 {
149#ifdef IN_RING3
150 DBGFR3Info(pVM, "phys", NULL, NULL);
151#endif
152 AssertMsgFailed(("No RAM range for %VGp-%VGp\n", GCPhys, GCPhysLast));
153 return VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
154 }
155 }
156
157 /*
158 * Allocate and initialize the new entry.
159 */
160 PPGMPHYSHANDLER pNew;
161 int rc = MMHyperAlloc(pVM, sizeof(*pNew), 0, MM_TAG_PGM_HANDLERS, (void **)&pNew);
162 if (VBOX_FAILURE(rc))
163 return rc;
164
165 pNew->Core.Key = GCPhys;
166 pNew->Core.KeyLast = GCPhysLast;
167 pNew->enmType = enmType;
168 pNew->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
169 pNew->pfnHandlerR3 = pfnHandlerR3;
170 pNew->pvUserR3 = pvUserR3;
171 pNew->pfnHandlerR0 = pfnHandlerR0;
172 pNew->pvUserR0 = pvUserR0;
173 pNew->pfnHandlerGC = pfnHandlerGC;
174 pNew->pvUserGC = pvUserGC;
175 pNew->pszDesc = pszDesc;
176
177 pgmLock(pVM);
178
179 /*
180 * Try insert into list.
181 */
182 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, &pNew->Core))
183 {
184 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pNew, pRam);
185 if (rc == VINF_PGM_GCPHYS_ALIASED)
186 {
187 pVM->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
188 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
189 }
190 pVM->pgm.s.fPhysCacheFlushPending = true;
191#ifndef IN_RING3
192 REMNotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, GCPhysLast - GCPhys + 1, !!pfnHandlerR3);
193#else
194 REMR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, GCPhysLast - GCPhys + 1, !!pfnHandlerR3);
195#endif
196 pgmUnlock(pVM);
197 if (rc != VINF_SUCCESS)
198 Log(("PGMHandlerPhysicalRegisterEx: returns %Vrc (%VGp-%VGp)\n", rc, GCPhys, GCPhysLast));
199 return rc;
200 }
201 pgmUnlock(pVM);
202
203#if defined(IN_RING3) && defined(VBOX_STRICT)
204 DBGFR3Info(pVM, "handlers", "phys nostats", NULL);
205#endif
206 AssertMsgFailed(("Conflict! GCPhys=%VGp GCPhysLast=%VGp pszDesc=%s\n", GCPhys, GCPhysLast, pszDesc));
207 MMHyperFree(pVM, pNew);
208 return VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
209}
210
211
212/**
213 * Sets ram range flags and attempts updating shadow PTs.
214 *
215 * @returns VBox status code.
216 * @retval VINF_SUCCESS when shadow PTs was successfully updated.
217 * @retval VINF_PGM_GCPHYS_ALIASED when the shadow PTs could be updated because
218 * the guest page aliased or/and mapped by multiple PTs.
219 * @param pVM The VM handle.
220 * @param pCur The physical handler.
221 */
222static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVM pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam)
223{
224 /*
225 * Iterate the guest ram pages updating the flags and flushing PT entries
226 * mapping the page.
227 */
228 bool fFlushTLBs = false;
229#if defined(PGMPOOL_WITH_GCPHYS_TRACKING) || defined(PGMPOOL_WITH_CACHE)
230 int rc = VINF_SUCCESS;
231#else
232 const int rc = VINF_PGM_GCPHYS_ALIASED;
233#endif
234 const unsigned fFlags = pgmHandlerPhysicalCalcFlags(pCur); Assert(!(fFlags & X86_PTE_PAE_PG_MASK));
235 RTUINT cPages = pCur->cPages;
236 RTUINT i = (pCur->Core.Key - pRam->GCPhys) >> PAGE_SHIFT;
237 for (;;)
238 {
239 /* Physical chunk in dynamically allocated range not present? */
240 if (RT_UNLIKELY(!PGM_PAGE_GET_HCPHYS(&pRam->aPages[i])))
241 {
242 RTGCPHYS GCPhys = pRam->GCPhys + (i << PAGE_SHIFT);
243#ifdef IN_RING3
244 int rc2 = pgmr3PhysGrowRange(pVM, GCPhys);
245#else
246 int rc2 = CTXALLMID(VMM, CallHost)(pVM, VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
247#endif
248 if (rc2 != VINF_SUCCESS)
249 return rc2;
250 }
251
252 if ((pRam->aPages[i].HCPhys & fFlags) != fFlags) /** @todo PAGE FLAGS */
253 {
254 pRam->aPages[i].HCPhys |= fFlags; /** @todo PAGE FLAGS */
255
256 Assert(PGM_PAGE_GET_HCPHYS(&pRam->aPages[i]));
257
258#ifdef PGMPOOL_WITH_GCPHYS_TRACKING
259 /* This code also makes ASSUMPTIONS about the cRefs and stuff. */
260 Assert(MM_RAM_FLAGS_IDX_SHIFT < MM_RAM_FLAGS_CREFS_SHIFT);
261 const uint16_t u16 = pRam->aPages[i].HCPhys >> MM_RAM_FLAGS_IDX_SHIFT; /** @todo PAGE FLAGS */
262 if (u16)
263 {
264 if ((u16 >> (MM_RAM_FLAGS_CREFS_SHIFT - MM_RAM_FLAGS_IDX_SHIFT)) != MM_RAM_FLAGS_CREFS_PHYSEXT)
265 pgmPoolTrackFlushGCPhysPT(pVM,
266 &pRam->aPages[i],
267 u16 & MM_RAM_FLAGS_IDX_MASK,
268 u16 >> (MM_RAM_FLAGS_CREFS_SHIFT - MM_RAM_FLAGS_IDX_SHIFT));
269 else if (u16 != ((MM_RAM_FLAGS_CREFS_PHYSEXT << (MM_RAM_FLAGS_CREFS_SHIFT - MM_RAM_FLAGS_IDX_SHIFT)) | MM_RAM_FLAGS_IDX_OVERFLOWED))
270 pgmPoolTrackFlushGCPhysPTs(pVM, &pRam->aPages[i], u16 & MM_RAM_FLAGS_IDX_MASK);
271 else
272 rc = pgmPoolTrackFlushGCPhysPTsSlow(pVM, &pRam->aPages[i]);
273 fFlushTLBs = true;
274 }
275#elif defined(PGMPOOL_WITH_CACHE)
276 rc = pgmPoolTrackFlushGCPhysPTsSlow(pVM, &pRam->aPages[i]);
277 fFlushTLBs = true;
278#endif
279 }
280
281 /* next */
282 if (--cPages == 0)
283 break;
284 i++;
285 }
286
287 if (fFlushTLBs && rc == VINF_SUCCESS)
288 {
289 PGM_INVL_GUEST_TLBS();
290 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: flushing guest TLBs\n"));
291 }
292 else
293 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: doesn't flush guest TLBs. rc=%Vrc\n", rc));
294 return rc;
295}
296
297
298/**
299 * Register a physical page access handler.
300 *
301 * @returns VBox status code.
302 * @param pVM VM Handle.
303 * @param GCPhys Start physical address.
304 */
305PGMDECL(int) PGMHandlerPhysicalDeregister(PVM pVM, RTGCPHYS GCPhys)
306{
307 /*
308 * Find the handler.
309 */
310 pgmLock(pVM);
311 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys);
312 if (pCur)
313 {
314 LogFlow(("PGMHandlerPhysicalDeregister: Removing Range %#VGp-%#VGp %s\n",
315 pCur->Core.Key, pCur->Core.KeyLast, HCSTRING(pCur->pszDesc)));
316
317 /*
318 * Clear the page bits and notify the REM about this change.
319 */
320 pgmHandlerPhysicalResetRamFlags(pVM, pCur);
321 pgmHandlerPhysicalDeregisterNotifyREM(pVM, pCur);
322 pgmUnlock(pVM);
323 MMHyperFree(pVM, pCur);
324 return VINF_SUCCESS;
325 }
326 pgmUnlock(pVM);
327
328 AssertMsgFailed(("Didn't find range starting at %VGp\n", GCPhys));
329 return VERR_PGM_HANDLER_NOT_FOUND;
330}
331
332
333/**
334 * Shared code with modify.
335 */
336static void pgmHandlerPhysicalDeregisterNotifyREM(PVM pVM, PPGMPHYSHANDLER pCur)
337{
338 RTGCPHYS GCPhysStart = pCur->Core.Key;
339 RTGCPHYS GCPhysLast = pCur->Core.KeyLast;
340
341 /*
342 * Page align the range.
343 */
344 if ( (pCur->Core.Key & PAGE_OFFSET_MASK)
345 || ((pCur->Core.KeyLast + 1) & PAGE_OFFSET_MASK))
346 {
347 if (GCPhysStart & PAGE_OFFSET_MASK)
348 {
349 if (pgmRamTestFlags(&pVM->pgm.s, GCPhysStart, MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_PHYSICAL_HANDLER | MM_RAM_FLAGS_PHYSICAL_ALL | MM_RAM_FLAGS_PHYSICAL_WRITE | MM_RAM_FLAGS_PHYSICAL_TEMP_OFF))
350 {
351 RTGCPHYS GCPhys = (GCPhysStart + (PAGE_SIZE - 1)) & X86_PTE_PAE_PG_MASK;
352 if ( GCPhys > GCPhysLast
353 || GCPhys < GCPhysStart)
354 return;
355 GCPhysStart = GCPhys;
356 }
357 else
358 GCPhysStart = GCPhysStart & X86_PTE_PAE_PG_MASK;
359 }
360 if (GCPhysLast & PAGE_OFFSET_MASK)
361 {
362 if (pgmRamTestFlags(&pVM->pgm.s, GCPhysLast, MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_PHYSICAL_HANDLER | MM_RAM_FLAGS_PHYSICAL_ALL | MM_RAM_FLAGS_PHYSICAL_WRITE | MM_RAM_FLAGS_PHYSICAL_TEMP_OFF))
363 {
364 RTGCPHYS GCPhys = (GCPhysStart & X86_PTE_PAE_PG_MASK) - 1;
365 if ( GCPhys < GCPhysStart
366 || GCPhys > GCPhysLast)
367 return;
368 GCPhysLast = GCPhys;
369 }
370 else
371 GCPhysLast += PAGE_SIZE - 1 - (GCPhysLast & PAGE_OFFSET_MASK);
372 }
373 }
374
375 /*
376 * Tell REM.
377 */
378 const bool fRestoreAsRAM = pCur->pfnHandlerR3
379 && pCur->enmType != PGMPHYSHANDLERTYPE_MMIO; /** @todo this isn't entirely correct. */
380#ifndef IN_RING3
381 REMNotifyHandlerPhysicalDeregister(pVM, pCur->enmType, GCPhysStart, GCPhysLast - GCPhysStart + 1, !!pCur->pfnHandlerR3, fRestoreAsRAM);
382#else
383 REMR3NotifyHandlerPhysicalDeregister(pVM, pCur->enmType, GCPhysStart, GCPhysLast - GCPhysStart + 1, !!pCur->pfnHandlerR3, fRestoreAsRAM);
384#endif
385}
386
387
388/**
389 * Resets ram range flags.
390 *
391 * @returns VBox status code.
392 * @retval VINF_SUCCESS when shadow PTs was successfully updated.
393 * @param pVM The VM handle.
394 * @param pCur The physical handler.
395 *
396 * @remark We don't start messing with the shadow page tables, as we've already got code
397 * in Trap0e which deals with out of sync handler flags (originally conceived for
398 * global pages).
399 */
400static void pgmHandlerPhysicalResetRamFlags(PVM pVM, PPGMPHYSHANDLER pCur)
401{
402 /*
403 * Iterate the guest ram pages updating the flags and flushing PT entries
404 * mapping the page.
405 */
406 RTUINT cPages = pCur->cPages;
407 RTGCPHYS GCPhys = pCur->Core.Key;
408 PPGMRAMRANGE pRamHint = NULL;
409 PPGM pPGM = &pVM->pgm.s;
410 for (;;)
411 {
412 pgmRamFlagsClearByGCPhysWithHint(pPGM, GCPhys,
413 MM_RAM_FLAGS_PHYSICAL_HANDLER | MM_RAM_FLAGS_PHYSICAL_WRITE | MM_RAM_FLAGS_PHYSICAL_ALL,
414 &pRamHint);
415 /* next */
416 if (--cPages == 0)
417 break;
418 GCPhys += PAGE_SIZE;
419 }
420
421 /*
422 * Check for partial start page.
423 */
424 if (pCur->Core.Key & PAGE_OFFSET_MASK)
425 {
426 RTGCPHYS GCPhys = pCur->Core.Key - 1;
427 for (;;)
428 {
429 PPGMPHYSHANDLER pBelow = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys, false);
430 if ( !pBelow
431 || (pBelow->Core.KeyLast >> PAGE_SHIFT) != (pCur->Core.Key >> PAGE_SHIFT))
432 break;
433 pgmRamFlagsSetByGCPhysWithHint(pPGM, GCPhys, pgmHandlerPhysicalCalcFlags(pCur), &pRamHint);
434
435 /* next? */
436 if ( (pBelow->Core.Key >> PAGE_SHIFT) != (pCur->Core.Key >> PAGE_SHIFT)
437 || !(pBelow->Core.Key & PAGE_OFFSET_MASK))
438 break;
439 GCPhys = pBelow->Core.Key - 1;
440 }
441 }
442
443 /*
444 * Check for partial end page.
445 */
446 if ((pCur->Core.KeyLast & PAGE_OFFSET_MASK) != PAGE_SIZE - 1)
447 {
448 RTGCPHYS GCPhys = pCur->Core.KeyLast + 1;
449 for (;;)
450 {
451 PPGMPHYSHANDLER pAbove = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys, true);
452 if ( !pAbove
453 || (pAbove->Core.Key >> PAGE_SHIFT) != (pCur->Core.KeyLast >> PAGE_SHIFT))
454 break;
455 pgmRamFlagsSetByGCPhysWithHint(pPGM, GCPhys, pgmHandlerPhysicalCalcFlags(pCur), &pRamHint);
456
457 /* next? */
458 if ( (pAbove->Core.KeyLast >> PAGE_SHIFT) != (pCur->Core.KeyLast >> PAGE_SHIFT)
459 || (pAbove->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_SIZE - 1)
460 break;
461 GCPhys = pAbove->Core.KeyLast + 1;
462 }
463 }
464}
465
466
467/**
468 * Modify a physical page access handler.
469 *
470 * Modification can only be done to the range it self, not the type or anything else.
471 *
472 * @returns VBox status code.
473 * For all return codes other than VERR_PGM_HANDLER_NOT_FOUND and VINF_SUCCESS the range is deregistered
474 * and a new registration must be performed!
475 * @param pVM VM handle.
476 * @param GCPhysCurrent Current location.
477 * @param GCPhys New location.
478 * @param GCPhysLast New last location.
479 */
480PGMDECL(int) PGMHandlerPhysicalModify(PVM pVM, RTGCPHYS GCPhysCurrent, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast)
481{
482 /*
483 * Remove it.
484 */
485 int rc;
486 pgmLock(pVM);
487 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhysCurrent);
488 if (pCur)
489 {
490 /*
491 * Clear the ram flags. (We're gonna move or free it!)
492 */
493 pgmHandlerPhysicalResetRamFlags(pVM, pCur);
494 const bool fRestoreAsRAM = pCur->pfnHandlerR3
495 && pCur->enmType != PGMPHYSHANDLERTYPE_MMIO; /** @todo this isn't entirely correct. */
496
497 /*
498 * Validate the new range, modify and reinsert.
499 */
500 if (GCPhysLast >= GCPhys)
501 {
502 /*
503 * We require the range to be within registered ram.
504 * There is no apparent need to support ranges which cover more than one ram range.
505 */
506 PPGMRAMRANGE pRam = CTXALLSUFF(pVM->pgm.s.pRamRanges);
507 while (pRam && GCPhys > pRam->GCPhysLast)
508 pRam = CTXALLSUFF(pRam->pNext);
509 if ( pRam
510 && GCPhys <= pRam->GCPhysLast
511 && GCPhysLast >= pRam->GCPhys)
512 {
513 pCur->Core.Key = GCPhys;
514 pCur->Core.KeyLast = GCPhysLast;
515 pCur->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + 1) >> PAGE_SHIFT;
516
517 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, &pCur->Core))
518 {
519 /*
520 * Set ram flags, flush shadow PT entries and finally tell REM about this.
521 */
522 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam);
523 if (rc == VINF_PGM_GCPHYS_ALIASED)
524 {
525 pVM->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
526 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
527 }
528 pVM->pgm.s.fPhysCacheFlushPending = true;
529
530#ifndef IN_RING3
531 REMNotifyHandlerPhysicalModify(pVM, pCur->enmType, GCPhysCurrent, GCPhys,
532 pCur->Core.KeyLast - GCPhys + 1, !!pCur->pfnHandlerR3, fRestoreAsRAM);
533#else
534 REMR3NotifyHandlerPhysicalModify(pVM, pCur->enmType, GCPhysCurrent, GCPhys,
535 pCur->Core.KeyLast - GCPhys + 1, !!pCur->pfnHandlerR3, fRestoreAsRAM);
536#endif
537 pgmUnlock(pVM);
538 Log(("PGMHandlerPhysicalModify: GCPhysCurrent=%VGp -> GCPhys=%VGp GCPhysLast=%VGp\n",
539 GCPhysCurrent, GCPhys, GCPhysLast));
540 return VINF_SUCCESS;
541 }
542
543 AssertMsgFailed(("Conflict! GCPhys=%VGp GCPhysLast=%VGp\n", GCPhys, GCPhysLast));
544 rc = VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
545 }
546 else
547 {
548 AssertMsgFailed(("No RAM range for %VGp-%VGp\n", GCPhys, GCPhysLast));
549 rc = VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
550 }
551 }
552 else
553 {
554 AssertMsgFailed(("Invalid range %VGp-%VGp\n", GCPhys, GCPhysLast));
555 rc = VERR_INVALID_PARAMETER;
556 }
557
558 /*
559 * Invalid new location, free it.
560 * We've only gotta notify REM and free the memory.
561 */
562 pgmHandlerPhysicalDeregisterNotifyREM(pVM, pCur);
563 MMHyperFree(pVM, pCur);
564 }
565 else
566 {
567 AssertMsgFailed(("Didn't find range starting at %VGp\n", GCPhysCurrent));
568 rc = VERR_PGM_HANDLER_NOT_FOUND;
569 }
570
571 pgmUnlock(pVM);
572 return rc;
573}
574
575
576/**
577 * Changes the callbacks associated with a physical access handler.
578 *
579 * @returns VBox status code.
580 * @param pVM VM Handle.
581 * @param GCPhys Start physical address.
582 * @param pfnHandlerR3 The R3 handler.
583 * @param pvUserR3 User argument to the R3 handler.
584 * @param pfnHandlerR0 The R0 handler.
585 * @param pvUserR0 User argument to the R0 handler.
586 * @param pfnHandlerGC The GC handler.
587 * @param pvUserGC User argument to the GC handler.
588 * This must be a GC pointer because it will be relocated!
589 * @param pszDesc Pointer to description string. This must not be freed.
590 */
591PGMDECL(int) PGMHandlerPhysicalChangeCallbacks(PVM pVM, RTGCPHYS GCPhys,
592 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3, RTR3PTR pvUserR3,
593 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0, RTR0PTR pvUserR0,
594 GCPTRTYPE(PFNPGMGCPHYSHANDLER) pfnHandlerGC, RTGCPTR pvUserGC,
595 R3PTRTYPE(const char *) pszDesc)
596{
597 /*
598 * Get the handler.
599 */
600 int rc = VINF_SUCCESS;
601 pgmLock(pVM);
602 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys);
603 if (pCur)
604 {
605 /*
606 * Change callbacks.
607 */
608 pCur->pfnHandlerR3 = pfnHandlerR3;
609 pCur->pvUserR3 = pvUserR3;
610 pCur->pfnHandlerR0 = pfnHandlerR0;
611 pCur->pvUserR0 = pvUserR0;
612 pCur->pfnHandlerGC = pfnHandlerGC;
613 pCur->pvUserGC = pvUserGC;
614 pCur->pszDesc = pszDesc;
615 }
616 else
617 {
618 AssertMsgFailed(("Didn't find range starting at %VGp\n", GCPhys));
619 rc = VERR_PGM_HANDLER_NOT_FOUND;
620 }
621
622 pgmUnlock(pVM);
623 return rc;
624}
625
626
627/**
628 * Splitts a physical access handler in two.
629 *
630 * @returns VBox status code.
631 * @param pVM VM Handle.
632 * @param GCPhys Start physical address of the handler.
633 * @param GCPhysSplit The split address.
634 */
635PGMDECL(int) PGMHandlerPhysicalSplit(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysSplit)
636{
637 AssertReturn(GCPhys < GCPhysSplit, VERR_INVALID_PARAMETER);
638
639 /*
640 * Do the allocation without owning the lock.
641 */
642 PPGMPHYSHANDLER pNew;
643 int rc = MMHyperAlloc(pVM, sizeof(*pNew), 0, MM_TAG_PGM_HANDLERS, (void **)&pNew);
644 if (VBOX_FAILURE(rc))
645 return rc;
646
647 /*
648 * Get the handler.
649 */
650 pgmLock(pVM);
651 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys);
652 if (pCur)
653 {
654 if (GCPhysSplit <= pCur->Core.KeyLast)
655 {
656 /*
657 * Create new handler node for the 2nd half.
658 */
659 *pNew = *pCur;
660 pNew->Core.Key = GCPhysSplit;
661 pNew->cPages = (pNew->Core.KeyLast - (pNew->Core.Key & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
662
663 pCur->Core.KeyLast = GCPhysSplit - 1;
664 pCur->cPages = (pCur->Core.KeyLast - (pCur->Core.Key & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
665
666 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, &pNew->Core))
667 {
668 LogFlow(("PGMHandlerPhysicalSplit: %VGp-%VGp and %VGp-%VGp\n",
669 pCur->Core.Key, pCur->Core.KeyLast, pNew->Core.Key, pNew->Core.KeyLast));
670 pgmUnlock(pVM);
671 return VINF_SUCCESS;
672 }
673 AssertMsgFailed(("whu?\n"));
674 rc = VERR_INTERNAL_ERROR;
675 }
676 else
677 {
678 AssertMsgFailed(("outside range: %VGp-%VGp split %VGp\n", pCur->Core.Key, pCur->Core.KeyLast, GCPhysSplit));
679 rc = VERR_INVALID_PARAMETER;
680 }
681 }
682 else
683 {
684 AssertMsgFailed(("Didn't find range starting at %VGp\n", GCPhys));
685 rc = VERR_PGM_HANDLER_NOT_FOUND;
686 }
687 pgmUnlock(pVM);
688 MMHyperFree(pVM, pNew);
689 return rc;
690}
691
692
693/**
694 * Joins up two adjacent physical access handlers which has the same callbacks.
695 *
696 * @returns VBox status code.
697 * @param pVM VM Handle.
698 * @param GCPhys1 Start physical address of the first handler.
699 * @param GCPhys2 Start physical address of the second handler.
700 */
701PGMDECL(int) PGMHandlerPhysicalJoin(PVM pVM, RTGCPHYS GCPhys1, RTGCPHYS GCPhys2)
702{
703 /*
704 * Get the handlers.
705 */
706 int rc;
707 pgmLock(pVM);
708 PPGMPHYSHANDLER pCur1 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys1);
709 if (pCur1)
710 {
711 PPGMPHYSHANDLER pCur2 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys2);
712 if (pCur2)
713 {
714 /*
715 * Make sure that they are adjacent, and that they've got the same callbacks.
716 */
717 if (pCur1->Core.KeyLast + 1 == pCur2->Core.Key)
718 {
719 if ( pCur1->pfnHandlerGC == pCur2->pfnHandlerGC
720 && pCur1->pfnHandlerR0 == pCur2->pfnHandlerR0
721 && pCur1->pfnHandlerR3 == pCur2->pfnHandlerR3)
722 {
723 PPGMPHYSHANDLER pCur3 = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys2);
724 if (pCur3 == pCur2)
725 {
726 pCur1->Core.KeyLast = pCur2->Core.KeyLast;
727 pCur1->cPages = (pCur1->Core.KeyLast - (pCur1->Core.Key & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
728 LogFlow(("PGMHandlerPhysicalJoin: %VGp-%VGp %VGp-%VGp\n",
729 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
730 pgmUnlock(pVM);
731 MMHyperFree(pVM, pCur2);
732 return VINF_SUCCESS;
733 }
734 Assert(pCur3 == pCur2);
735 rc = VERR_INTERNAL_ERROR;
736 }
737 else
738 {
739 AssertMsgFailed(("mismatching handlers\n"));
740 rc = VERR_ACCESS_DENIED;
741 }
742 }
743 else
744 {
745 AssertMsgFailed(("not adjacent: %VGp-%VGp %VGp-%VGp\n",
746 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
747 rc = VERR_INVALID_PARAMETER;
748 }
749 }
750 else
751 {
752 AssertMsgFailed(("Didn't find range starting at %VGp\n", GCPhys2));
753 rc = VERR_PGM_HANDLER_NOT_FOUND;
754 }
755 }
756 else
757 {
758 AssertMsgFailed(("Didn't find range starting at %VGp\n", GCPhys1));
759 rc = VERR_PGM_HANDLER_NOT_FOUND;
760 }
761 pgmUnlock(pVM);
762 return rc;
763
764}
765
766
767/**
768 * Resets any modifications to individual pages in a physical
769 * page access handler region.
770 *
771 * This is used in pair with PGMHandlerPhysicalModify().
772 *
773 * @returns VBox status code.
774 * @param pVM VM Handle
775 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister().
776 */
777PGMDECL(int) PGMHandlerPhysicalReset(PVM pVM, RTGCPHYS GCPhys)
778{
779 /*
780 * Find the handler.
781 */
782 pgmLock(pVM);
783 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys);
784 if (pCur)
785 {
786 /*
787 * Validate type.
788 */
789 switch (pCur->enmType)
790 {
791 case PGMPHYSHANDLERTYPE_PHYSICAL_WRITE:
792 case PGMPHYSHANDLERTYPE_PHYSICAL_ALL:
793 {
794 /*
795 * Set the flags and flush shadow PT entries.
796 */
797 STAM_COUNTER_INC(&pVM->pgm.s.StatHandlePhysicalReset);
798 PPGMRAMRANGE pRam = CTXALLSUFF(pVM->pgm.s.pRamRanges);
799 while (pRam && GCPhys > pRam->GCPhysLast)
800 pRam = CTXALLSUFF(pRam->pNext);
801 int rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam);
802 if (rc == VINF_PGM_GCPHYS_ALIASED)
803 {
804 pVM->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
805 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
806 }
807 pVM->pgm.s.fPhysCacheFlushPending = true;
808 pgmUnlock(pVM);
809 return VINF_SUCCESS;
810 }
811
812 /*
813 * Invalid.
814 */
815 case PGMPHYSHANDLERTYPE_MMIO:
816 AssertMsgFailed(("Can't reset type %d!\n", pCur->enmType));
817 pgmUnlock(pVM);
818 return VERR_INTERNAL_ERROR;
819
820 default:
821 AssertMsgFailed(("Invalid type %d! Corruption!\n", pCur->enmType));
822 pgmUnlock(pVM);
823 return VERR_INTERNAL_ERROR;
824 }
825 }
826 pgmUnlock(pVM);
827 AssertMsgFailed(("Didn't find MMIO Range starting at %#x\n", GCPhys));
828 return VERR_PGM_HANDLER_NOT_FOUND;
829}
830
831
832/**
833 * Search for virtual handler with matching physical address
834 *
835 * @returns VBox status code
836 * @param pVM The VM handle.
837 * @param GCPhys GC physical address to search for.
838 * @param ppVirt Where to store the pointer to the virtual handler structure.
839 * @param piPage Where to store the pointer to the index of the cached physical page.
840 */
841int pgmHandlerVirtualFindByPhysAddr(PVM pVM, RTGCPHYS GCPhys, PPGMVIRTHANDLER *ppVirt, unsigned *piPage)
842{
843 STAM_PROFILE_START(CTXSUFF(&pVM->pgm.s.StatVirtHandleSearchByPhys), a);
844 Assert(ppVirt);
845
846 PPGMPHYS2VIRTHANDLER pCur;
847 pCur = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysRangeGet(&CTXSUFF(pVM->pgm.s.pTrees)->PhysToVirtHandlers, GCPhys);
848 if (pCur)
849 {
850 /* found a match! */
851#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
852 AssertRelease(pCur->offNextAlias & PGMPHYS2VIRTHANDLER_IS_HEAD);
853#endif
854 *ppVirt = (PPGMVIRTHANDLER)((uintptr_t)pCur + pCur->offVirtHandler);
855 *piPage = pCur - &(*ppVirt)->aPhysToVirt[0];
856
857 LogFlow(("PHYS2VIRT: found match for %VGp -> %VGv *piPage=%#x\n",
858 GCPhys, (*ppVirt)->GCPtr, *piPage));
859 STAM_PROFILE_STOP(CTXSUFF(&pVM->pgm.s.StatVirtHandleSearchByPhys), a);
860 return VINF_SUCCESS;
861 }
862
863 *ppVirt = NULL;
864 STAM_PROFILE_STOP(CTXSUFF(&pVM->pgm.s.StatVirtHandleSearchByPhys), a);
865 return VERR_PGM_HANDLER_NOT_FOUND;
866}
867
868
869/**
870 * Deal with aliases in phys2virt.
871 *
872 * @param pVM The VM handle.
873 * @param pPhys2Virt The node we failed insert.
874 */
875static void pgmHandlerVirtualInsertAliased(PVM pVM, PPGMPHYS2VIRTHANDLER pPhys2Virt)
876{
877 /*
878 * First find the node which is conflicting with us.
879 */
880 /** @todo Deal with partial overlapping. (Unlikly situation, so I'm too lazy to do anything about it now.) */
881 PPGMPHYS2VIRTHANDLER pHead = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key);
882 if (!pHead)
883 {
884 /** @todo do something clever here... */
885#ifdef IN_RING3
886 LogRel(("pgmHandlerVirtualInsertAliased: %VGp-%VGp\n", pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast));
887#endif
888 pPhys2Virt->offNextAlias = 0;
889 return;
890 }
891#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
892 AssertReleaseMsg(pHead != pPhys2Virt, ("%VGp-%VGp offVirtHandler=%#RX32\n",
893 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler));
894#endif
895
896 /** @todo check if the current head node covers the ground we do. This is highly unlikely
897 * and I'm too lazy to implement this now as it will require sorting the list and stuff like that. */
898
899 /*
900 * Insert ourselves as the next node.
901 */
902 if (!(pHead->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK))
903 pPhys2Virt->offNextAlias = PGMPHYS2VIRTHANDLER_IN_TREE;
904 else
905 {
906 PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pHead + (pHead->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
907 pPhys2Virt->offNextAlias = ((intptr_t)pNext - (intptr_t)pPhys2Virt)
908 | PGMPHYS2VIRTHANDLER_IN_TREE;
909 }
910 pHead->offNextAlias = ((intptr_t)pPhys2Virt - (intptr_t)pHead)
911 | (pHead->offNextAlias & ~PGMPHYS2VIRTHANDLER_OFF_MASK);
912 Log(("pgmHandlerVirtualInsertAliased: %VGp-%VGp offNextAlias=%#RX32\n", pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias));
913}
914
915
916/**
917 * Resets one virtual handler range.
918 *
919 * @returns 0
920 * @param pNode Pointer to a PGMVIRTHANDLER.
921 * @param pvUser The VM handle.
922 */
923DECLCALLBACK(int) pgmHandlerVirtualResetOne(PAVLROGCPTRNODECORE pNode, void *pvUser)
924{
925 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)pNode;
926 PVM pVM = (PVM)pvUser;
927
928 /*
929 * Calc flags.
930 */
931 unsigned fFlags;
932 switch (pCur->enmType)
933 {
934 case PGMVIRTHANDLERTYPE_EIP:
935 case PGMVIRTHANDLERTYPE_NORMAL: fFlags = MM_RAM_FLAGS_VIRTUAL_HANDLER; break;
936 case PGMVIRTHANDLERTYPE_WRITE: fFlags = MM_RAM_FLAGS_VIRTUAL_HANDLER | MM_RAM_FLAGS_VIRTUAL_WRITE; break;
937 case PGMVIRTHANDLERTYPE_ALL: fFlags = MM_RAM_FLAGS_VIRTUAL_HANDLER | MM_RAM_FLAGS_VIRTUAL_ALL; break;
938 /* hypervisor handlers need no flags and wouldn't have nowhere to put them in any case. */
939 case PGMVIRTHANDLERTYPE_HYPERVISOR:
940 return 0;
941 default:
942 AssertMsgFailed(("Invalid type %d\n", pCur->enmType));
943 return 0;
944 }
945
946 /*
947 * Iterate the pages and apply the flags.
948 */
949 PPGMRAMRANGE pRamHint = NULL;
950 RTGCUINTPTR offPage = ((RTGCUINTPTR)pCur->GCPtr & PAGE_OFFSET_MASK);
951 RTGCUINTPTR cbLeft = pCur->cb;
952 for (unsigned iPage = 0; iPage < pCur->cPages; iPage++)
953 {
954 PPGMPHYS2VIRTHANDLER pPhys2Virt = &pCur->aPhysToVirt[iPage];
955 if (pPhys2Virt->Core.Key != NIL_RTGCPHYS)
956 {
957 /* Update the flags. */
958 int rc = pgmRamFlagsSetByGCPhysWithHint(&pVM->pgm.s, pPhys2Virt->Core.Key, fFlags, &pRamHint);
959 AssertRC(rc);
960
961 /* Need to insert the page in the Phys2Virt lookup tree? */
962 if (pPhys2Virt->Core.KeyLast == NIL_RTGCPHYS)
963 {
964#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
965 AssertRelease(!pPhys2Virt->offNextAlias);
966#endif
967 unsigned cbPhys = cbLeft;
968 if (cbPhys > PAGE_SIZE - offPage)
969 cbPhys = PAGE_SIZE - offPage;
970 else
971 Assert(iPage == pCur->cPages - 1);
972 pPhys2Virt->Core.KeyLast = pPhys2Virt->Core.Key + cbPhys - 1; /* inclusive */
973 pPhys2Virt->offNextAlias = PGMPHYS2VIRTHANDLER_IS_HEAD | PGMPHYS2VIRTHANDLER_IN_TREE;
974 if (!RTAvlroGCPhysInsert(&pVM->pgm.s.CTXSUFF(pTrees)->PhysToVirtHandlers, &pPhys2Virt->Core))
975 pgmHandlerVirtualInsertAliased(pVM, pPhys2Virt);
976#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
977 else
978 AssertReleaseMsg(RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key) == &pPhys2Virt->Core,
979 ("%VGp-%VGp offNextAlias=%#RX32\n",
980 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias));
981#endif
982 Log2(("PHYS2VIRT: Insert physical range %VGp-%VGp offNextAlias=%#RX32 %s\n",
983 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias, R3STRING(pCur->pszDesc)));
984 }
985 }
986 cbLeft -= PAGE_SIZE - offPage;
987 offPage = 0;
988 }
989
990 return 0;
991}
992
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette