VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllGst.h@ 22029

Last change on this file since 22029 was 20374, checked in by vboxsync, 16 years ago

*: s/RT_\(BEGIN|END\)_DECLS/RT_C_DECLS_\1/g

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 19.0 KB
Line 
1/* $Id: PGMAllGst.h 20374 2009-06-08 00:43:21Z vboxsync $ */
2/** @file
3 * VBox - Page Manager, Guest Paging Template - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Internal Functions *
25*******************************************************************************/
26RT_C_DECLS_BEGIN
27PGM_GST_DECL(int, GetPage)(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys);
28PGM_GST_DECL(int, ModifyPage)(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask);
29PGM_GST_DECL(int, GetPDE)(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPDE);
30PGM_GST_DECL(bool, HandlerVirtualUpdate)(PVM pVM, uint32_t cr4);
31RT_C_DECLS_END
32
33
34
35/**
36 * Gets effective Guest OS page information.
37 *
38 * When GCPtr is in a big page, the function will return as if it was a normal
39 * 4KB page. If the need for distinguishing between big and normal page becomes
40 * necessary at a later point, a PGMGstGetPage Ex() will be created for that
41 * purpose.
42 *
43 * @returns VBox status.
44 * @param pVCpu The VMCPU handle.
45 * @param GCPtr Guest Context virtual address of the page. Page aligned!
46 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
47 * @param pGCPhys Where to store the GC physical address of the page.
48 * This is page aligned. The fact that the
49 */
50PGM_GST_DECL(int, GetPage)(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
51{
52#if PGM_GST_TYPE == PGM_TYPE_REAL \
53 || PGM_GST_TYPE == PGM_TYPE_PROT
54 /*
55 * Fake it.
56 */
57 if (pfFlags)
58 *pfFlags = X86_PTE_P | X86_PTE_RW | X86_PTE_US;
59 if (pGCPhys)
60 *pGCPhys = GCPtr & PAGE_BASE_GC_MASK;
61 return VINF_SUCCESS;
62
63#elif PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64
64
65 PVM pVM = pVCpu->CTX_SUFF(pVM);
66 /*
67 * Get the PDE.
68 */
69# if PGM_GST_TYPE == PGM_TYPE_32BIT
70 X86PDE Pde = pgmGstGet32bitPDE(&pVCpu->pgm.s, GCPtr);
71
72#elif PGM_GST_TYPE == PGM_TYPE_PAE
73 /* pgmGstGetPaePDE will return 0 if the PDPTE is marked as not present.
74 * All the other bits in the PDPTE are only valid in long mode (r/w, u/s, nx). */
75 X86PDEPAE Pde = pgmGstGetPaePDE(&pVCpu->pgm.s, GCPtr);
76 bool fNoExecuteBitValid = !!(CPUMGetGuestEFER(pVCpu) & MSR_K6_EFER_NXE);
77
78#elif PGM_GST_TYPE == PGM_TYPE_AMD64
79 PX86PML4E pPml4e;
80 X86PDPE Pdpe;
81 X86PDEPAE Pde = pgmGstGetLongModePDEEx(&pVCpu->pgm.s, GCPtr, &pPml4e, &Pdpe);
82 bool fNoExecuteBitValid = !!(CPUMGetGuestEFER(pVCpu) & MSR_K6_EFER_NXE);
83
84 Assert(pPml4e);
85 if (!(pPml4e->n.u1Present & Pdpe.n.u1Present))
86 return VERR_PAGE_TABLE_NOT_PRESENT;
87
88 /* Merge accessed, write, user and no-execute bits into the PDE. */
89 Pde.n.u1Accessed &= pPml4e->n.u1Accessed & Pdpe.lm.u1Accessed;
90 Pde.n.u1Write &= pPml4e->n.u1Write & Pdpe.lm.u1Write;
91 Pde.n.u1User &= pPml4e->n.u1User & Pdpe.lm.u1User;
92 Pde.n.u1NoExecute &= pPml4e->n.u1NoExecute & Pdpe.lm.u1NoExecute;
93# endif
94
95 /*
96 * Lookup the page.
97 */
98 if (!Pde.n.u1Present)
99 return VERR_PAGE_TABLE_NOT_PRESENT;
100
101 if ( !Pde.b.u1Size
102# if PGM_GST_TYPE != PGM_TYPE_AMD64
103 || !(CPUMGetGuestCR4(pVCpu) & X86_CR4_PSE)
104# endif
105 )
106 {
107 PGSTPT pPT;
108 int rc = PGM_GCPHYS_2_PTR(pVM, Pde.u & GST_PDE_PG_MASK, &pPT);
109 if (RT_FAILURE(rc))
110 return rc;
111
112 /*
113 * Get PT entry and check presence.
114 */
115 const GSTPTE Pte = pPT->a[(GCPtr >> GST_PT_SHIFT) & GST_PT_MASK];
116 if (!Pte.n.u1Present)
117 return VERR_PAGE_NOT_PRESENT;
118
119 /*
120 * Store the result.
121 * RW and US flags depend on all levels (bitwise AND) - except for legacy PAE
122 * where the PDPE is simplified.
123 */
124 if (pfFlags)
125 {
126 *pfFlags = (Pte.u & ~GST_PTE_PG_MASK)
127 & ((Pde.u & (X86_PTE_RW | X86_PTE_US)) | ~(uint64_t)(X86_PTE_RW | X86_PTE_US));
128# if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE)
129 /* The NX bit is determined by a bitwise OR between the PT and PD */
130 if (fNoExecuteBitValid)
131 *pfFlags |= (Pte.u & Pde.u & X86_PTE_PAE_NX);
132# endif
133 }
134 if (pGCPhys)
135 *pGCPhys = Pte.u & GST_PTE_PG_MASK;
136 }
137 else
138 {
139 /*
140 * Map big to 4k PTE and store the result
141 */
142 if (pfFlags)
143 {
144 *pfFlags = (Pde.u & ~(GST_PTE_PG_MASK | X86_PTE_PAT))
145 | ((Pde.u & X86_PDE4M_PAT) >> X86_PDE4M_PAT_SHIFT);
146# if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE)
147 /* The NX bit is determined by a bitwise OR between the PT and PD */
148 if (fNoExecuteBitValid)
149 *pfFlags |= (Pde.u & X86_PTE_PAE_NX);
150# endif
151 }
152 if (pGCPhys)
153 *pGCPhys = GST_GET_PDE_BIG_PG_GCPHYS(Pde) | (GCPtr & (~GST_PDE_BIG_PG_MASK ^ ~GST_PTE_PG_MASK));
154 }
155 return VINF_SUCCESS;
156#else
157# error "shouldn't be here!"
158 /* something else... */
159 return VERR_NOT_SUPPORTED;
160#endif
161}
162
163
164/**
165 * Modify page flags for a range of pages in the guest's tables
166 *
167 * The existing flags are ANDed with the fMask and ORed with the fFlags.
168 *
169 * @returns VBox status code.
170 * @param pVCpu The VMCPU handle.
171 * @param GCPtr Virtual address of the first page in the range. Page aligned!
172 * @param cb Size (in bytes) of the page range to apply the modification to. Page aligned!
173 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
174 * @param fMask The AND mask - page flags X86_PTE_*.
175 */
176PGM_GST_DECL(int, ModifyPage)(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
177{
178#if PGM_GST_TYPE == PGM_TYPE_32BIT \
179 || PGM_GST_TYPE == PGM_TYPE_PAE \
180 || PGM_GST_TYPE == PGM_TYPE_AMD64
181
182 PVM pVM = pVCpu->CTX_SUFF(pVM);
183 for (;;)
184 {
185 /*
186 * Get the PD entry.
187 */
188# if PGM_GST_TYPE == PGM_TYPE_32BIT
189 PX86PDE pPde = pgmGstGet32bitPDEPtr(&pVCpu->pgm.s, GCPtr);
190
191# elif PGM_GST_TYPE == PGM_TYPE_PAE
192 /* pgmGstGetPaePDEPtr will return 0 if the PDPTE is marked as not present
193 * All the other bits in the PDPTE are only valid in long mode (r/w, u/s, nx)
194 */
195 PX86PDEPAE pPde = pgmGstGetPaePDEPtr(&pVCpu->pgm.s, GCPtr);
196 Assert(pPde);
197 if (!pPde)
198 return VERR_PAGE_TABLE_NOT_PRESENT;
199# elif PGM_GST_TYPE == PGM_TYPE_AMD64
200 /** @todo Setting the r/w, u/s & nx bits might have no effect depending on the pdpte & pml4 values */
201 PX86PDEPAE pPde = pgmGstGetLongModePDEPtr(&pVCpu->pgm.s, GCPtr);
202 Assert(pPde);
203 if (!pPde)
204 return VERR_PAGE_TABLE_NOT_PRESENT;
205# endif
206 GSTPDE Pde = *pPde;
207 Assert(Pde.n.u1Present);
208 if (!Pde.n.u1Present)
209 return VERR_PAGE_TABLE_NOT_PRESENT;
210
211 if ( !Pde.b.u1Size
212# if PGM_GST_TYPE != PGM_TYPE_AMD64
213 || !(CPUMGetGuestCR4(pVCpu) & X86_CR4_PSE)
214# endif
215 )
216 {
217 /*
218 * 4KB Page table
219 *
220 * Walk page tables and pages till we're done.
221 */
222 PGSTPT pPT;
223 int rc = PGM_GCPHYS_2_PTR(pVM, Pde.u & GST_PDE_PG_MASK, &pPT);
224 if (RT_FAILURE(rc))
225 return rc;
226
227 unsigned iPTE = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
228 while (iPTE < RT_ELEMENTS(pPT->a))
229 {
230 GSTPTE Pte = pPT->a[iPTE];
231 Pte.u = (Pte.u & (fMask | X86_PTE_PAE_PG_MASK))
232 | (fFlags & ~GST_PTE_PG_MASK);
233 pPT->a[iPTE] = Pte;
234
235 /* next page */
236 cb -= PAGE_SIZE;
237 if (!cb)
238 return VINF_SUCCESS;
239 GCPtr += PAGE_SIZE;
240 iPTE++;
241 }
242 }
243 else
244 {
245 /*
246 * 4MB Page table
247 */
248# if PGM_GST_TYPE == PGM_TYPE_32BIT
249 Pde.u = (Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PG_HIGH_MASK | X86_PDE4M_PS))
250# else
251 Pde.u = (Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PS))
252# endif
253 | (fFlags & ~GST_PTE_PG_MASK)
254 | ((fFlags & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT);
255 *pPde = Pde;
256
257 /* advance */
258 const unsigned cbDone = GST_BIG_PAGE_SIZE - (GCPtr & GST_BIG_PAGE_OFFSET_MASK);
259 if (cbDone >= cb)
260 return VINF_SUCCESS;
261 cb -= cbDone;
262 GCPtr += cbDone;
263 }
264 }
265
266#else
267 /* real / protected mode: ignore. */
268 return VINF_SUCCESS;
269#endif
270}
271
272
273/**
274 * Retrieve guest PDE information
275 *
276 * @returns VBox status code.
277 * @param pVCpu The VMCPU handle.
278 * @param GCPtr Guest context pointer
279 * @param pPDE Pointer to guest PDE structure
280 */
281PGM_GST_DECL(int, GetPDE)(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPDE)
282{
283#if PGM_GST_TYPE == PGM_TYPE_32BIT \
284 || PGM_GST_TYPE == PGM_TYPE_PAE \
285 || PGM_GST_TYPE == PGM_TYPE_AMD64
286
287# if PGM_GST_TYPE == PGM_TYPE_32BIT
288 X86PDE Pde = pgmGstGet32bitPDE(&pVCpu->pgm.s, GCPtr);
289# elif PGM_GST_TYPE == PGM_TYPE_PAE
290 X86PDEPAE Pde = pgmGstGetPaePDE(&pVCpu->pgm.s, GCPtr);
291# elif PGM_GST_TYPE == PGM_TYPE_AMD64
292 X86PDEPAE Pde = pgmGstGetLongModePDE(&pVCpu->pgm.s, GCPtr);
293# endif
294
295 pPDE->u = (X86PGPAEUINT)Pde.u;
296 return VINF_SUCCESS;
297#else
298 AssertFailed();
299 return VERR_NOT_IMPLEMENTED;
300#endif
301}
302
303
304#if PGM_GST_TYPE == PGM_TYPE_32BIT \
305 || PGM_GST_TYPE == PGM_TYPE_PAE \
306 || PGM_GST_TYPE == PGM_TYPE_AMD64
307/**
308 * Updates one virtual handler range.
309 *
310 * @returns 0
311 * @param pNode Pointer to a PGMVIRTHANDLER.
312 * @param pvUser Pointer to a PGMVHUARGS structure (see PGM.cpp).
313 */
314static DECLCALLBACK(int) PGM_GST_NAME(VirtHandlerUpdateOne)(PAVLROGCPTRNODECORE pNode, void *pvUser)
315{
316 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)pNode;
317 PPGMHVUSTATE pState = (PPGMHVUSTATE)pvUser;
318 PVM pVM = pState->pVM;
319 PVMCPU pVCpu = pState->pVCpu;
320 Assert(pCur->enmType != PGMVIRTHANDLERTYPE_HYPERVISOR);
321
322#if PGM_GST_TYPE == PGM_TYPE_32BIT
323 PX86PD pPDSrc = pgmGstGet32bitPDPtr(&pVCpu->pgm.s);
324#endif
325
326 RTGCPTR GCPtr = pCur->Core.Key;
327#if PGM_GST_MODE != PGM_MODE_AMD64
328 /* skip all stuff above 4GB if not AMD64 mode. */
329 if (GCPtr >= _4GB)
330 return 0;
331#endif
332
333 unsigned offPage = GCPtr & PAGE_OFFSET_MASK;
334 unsigned iPage = 0;
335 while (iPage < pCur->cPages)
336 {
337#if PGM_GST_TYPE == PGM_TYPE_32BIT
338 X86PDE Pde = pPDSrc->a[GCPtr >> X86_PD_SHIFT];
339#elif PGM_GST_TYPE == PGM_TYPE_PAE
340 X86PDEPAE Pde = pgmGstGetPaePDE(&pVCpu->pgm.s, GCPtr);
341#elif PGM_GST_TYPE == PGM_TYPE_AMD64
342 X86PDEPAE Pde = pgmGstGetLongModePDE(&pVCpu->pgm.s, GCPtr);
343#endif
344 if (Pde.n.u1Present)
345 {
346 if ( !Pde.b.u1Size
347# if PGM_GST_TYPE != PGM_TYPE_AMD64
348 || !(pState->cr4 & X86_CR4_PSE)
349# endif
350 )
351 {
352 /*
353 * Normal page table.
354 */
355 PGSTPT pPT;
356 int rc = PGM_GCPHYS_2_PTR(pVM, Pde.u & GST_PDE_PG_MASK, &pPT);
357 if (RT_SUCCESS(rc))
358 {
359 for (unsigned iPTE = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
360 iPTE < RT_ELEMENTS(pPT->a) && iPage < pCur->cPages;
361 iPTE++, iPage++, GCPtr += PAGE_SIZE, offPage = 0)
362 {
363 GSTPTE Pte = pPT->a[iPTE];
364 RTGCPHYS GCPhysNew;
365 if (Pte.n.u1Present)
366 GCPhysNew = (RTGCPHYS)(pPT->a[iPTE].u & GST_PTE_PG_MASK) + offPage;
367 else
368 GCPhysNew = NIL_RTGCPHYS;
369 if (pCur->aPhysToVirt[iPage].Core.Key != GCPhysNew)
370 {
371 if (pCur->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
372 pgmHandlerVirtualClearPage(&pVM->pgm.s, pCur, iPage);
373#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
374 AssertReleaseMsg(!pCur->aPhysToVirt[iPage].offNextAlias,
375 ("{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} GCPhysNew=%RGp\n",
376 pCur->aPhysToVirt[iPage].Core.Key, pCur->aPhysToVirt[iPage].Core.KeyLast,
377 pCur->aPhysToVirt[iPage].offVirtHandler, pCur->aPhysToVirt[iPage].offNextAlias, GCPhysNew));
378#endif
379 pCur->aPhysToVirt[iPage].Core.Key = GCPhysNew;
380 pState->fTodo |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
381 }
382 }
383 }
384 else
385 {
386 /* not-present. */
387 offPage = 0;
388 AssertRC(rc);
389 for (unsigned iPTE = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
390 iPTE < RT_ELEMENTS(pPT->a) && iPage < pCur->cPages;
391 iPTE++, iPage++, GCPtr += PAGE_SIZE)
392 {
393 if (pCur->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
394 {
395 pgmHandlerVirtualClearPage(&pVM->pgm.s, pCur, iPage);
396#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
397 AssertReleaseMsg(!pCur->aPhysToVirt[iPage].offNextAlias,
398 ("{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
399 pCur->aPhysToVirt[iPage].Core.Key, pCur->aPhysToVirt[iPage].Core.KeyLast,
400 pCur->aPhysToVirt[iPage].offVirtHandler, pCur->aPhysToVirt[iPage].offNextAlias));
401#endif
402 pCur->aPhysToVirt[iPage].Core.Key = NIL_RTGCPHYS;
403 pState->fTodo |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
404 }
405 }
406 }
407 }
408 else
409 {
410 /*
411 * 2/4MB page.
412 */
413 RTGCPHYS GCPhys = (RTGCPHYS)(Pde.u & GST_PDE_PG_MASK);
414 for (unsigned i4KB = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
415 i4KB < PAGE_SIZE / sizeof(GSTPDE) && iPage < pCur->cPages;
416 i4KB++, iPage++, GCPtr += PAGE_SIZE, offPage = 0)
417 {
418 RTGCPHYS GCPhysNew = GCPhys + (i4KB << PAGE_SHIFT) + offPage;
419 if (pCur->aPhysToVirt[iPage].Core.Key != GCPhysNew)
420 {
421 if (pCur->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
422 pgmHandlerVirtualClearPage(&pVM->pgm.s, pCur, iPage);
423#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
424 AssertReleaseMsg(!pCur->aPhysToVirt[iPage].offNextAlias,
425 ("{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} GCPhysNew=%RGp\n",
426 pCur->aPhysToVirt[iPage].Core.Key, pCur->aPhysToVirt[iPage].Core.KeyLast,
427 pCur->aPhysToVirt[iPage].offVirtHandler, pCur->aPhysToVirt[iPage].offNextAlias, GCPhysNew));
428#endif
429 pCur->aPhysToVirt[iPage].Core.Key = GCPhysNew;
430 pState->fTodo |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
431 }
432 }
433 } /* pde type */
434 }
435 else
436 {
437 /* not-present. */
438 for (unsigned cPages = (GST_PT_MASK + 1) - ((GCPtr >> GST_PT_SHIFT) & GST_PT_MASK);
439 cPages && iPage < pCur->cPages;
440 iPage++, GCPtr += PAGE_SIZE)
441 {
442 if (pCur->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
443 {
444 pgmHandlerVirtualClearPage(&pVM->pgm.s, pCur, iPage);
445 pCur->aPhysToVirt[iPage].Core.Key = NIL_RTGCPHYS;
446 pState->fTodo |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
447 }
448 }
449 offPage = 0;
450 }
451 } /* for pages in virtual mapping. */
452
453 return 0;
454}
455#endif /* 32BIT, PAE and AMD64 */
456
457
458/**
459 * Updates the virtual page access handlers.
460 *
461 * @returns true if bits were flushed.
462 * @returns false if bits weren't flushed.
463 * @param pVM VM handle.
464 * @param pPDSrc The page directory.
465 * @param cr4 The cr4 register value.
466 */
467PGM_GST_DECL(bool, HandlerVirtualUpdate)(PVM pVM, uint32_t cr4)
468{
469#if PGM_GST_TYPE == PGM_TYPE_32BIT \
470 || PGM_GST_TYPE == PGM_TYPE_PAE \
471 || PGM_GST_TYPE == PGM_TYPE_AMD64
472
473 /** @todo
474 * In theory this is not sufficient: the guest can change a single page in a range with invlpg
475 */
476
477 /*
478 * Resolve any virtual address based access handlers to GC physical addresses.
479 * This should be fairly quick.
480 */
481 RTUINT fTodo = 0;
482
483 pgmLock(pVM);
484 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3HandlerVirtualUpdate), a);
485
486 for (unsigned i=0;i<pVM->cCPUs;i++)
487 {
488 PGMHVUSTATE State;
489 PVMCPU pVCpu = &pVM->aCpus[i];
490
491 State.pVM = pVM;
492 State.pVCpu = pVCpu;
493 State.fTodo = pVCpu->pgm.s.fSyncFlags;
494 State.cr4 = cr4;
495 RTAvlroGCPtrDoWithAll(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, true, PGM_GST_NAME(VirtHandlerUpdateOne), &State);
496
497 fTodo |= State.fTodo;
498 }
499 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3HandlerVirtualUpdate), a);
500
501
502 /*
503 * Set / reset bits?
504 */
505 if (fTodo & PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL)
506 {
507 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3HandlerVirtualReset), b);
508 Log(("HandlerVirtualUpdate: resets bits\n"));
509 RTAvlroGCPtrDoWithAll(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, true, pgmHandlerVirtualResetOne, pVM);
510
511 for (unsigned i=0;i<pVM->cCPUs;i++)
512 {
513 PVMCPU pVCpu = &pVM->aCpus[i];
514 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
515 }
516
517 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3HandlerVirtualReset), b);
518 }
519 pgmUnlock(pVM);
520
521 return !!(fTodo & PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL);
522
523#else /* real / protected */
524 return false;
525#endif
526}
527
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette