VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMGst.h@ 11964

Last change on this file since 11964 was 11525, checked in by vboxsync, 16 years ago

Added support for PSE-36.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 20.0 KB
Line 
1/* $Id: PGMGst.h 11525 2008-08-21 09:07:51Z vboxsync $ */
2/** @file
3 * VBox - Page Manager / Monitor, Guest Paging Template.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Defined Constants And Macros *
24*******************************************************************************/
25#undef GSTPT
26#undef PGSTPT
27#undef GSTPTE
28#undef PGSTPTE
29#undef GSTPD
30#undef PGSTPD
31#undef GSTPDE
32#undef PGSTPDE
33#undef GST_BIG_PAGE_SIZE
34#undef GST_BIG_PAGE_OFFSET_MASK
35#undef GST_PDE_PG_MASK
36#undef GST_PDE_BIG_PG_MASK
37#undef GST_PD_SHIFT
38#undef GST_PD_MASK
39#undef GST_PTE_PG_MASK
40#undef GST_PT_SHIFT
41#undef GST_PT_MASK
42#undef GST_TOTAL_PD_ENTRIES
43#undef GST_CR3_PAGE_MASK
44#undef GST_PDPE_ENTRIES
45#undef GST_GET_PDE_BIG_PG_GCPHYS
46
47#if PGM_GST_TYPE == PGM_TYPE_32BIT \
48 || PGM_GST_TYPE == PGM_TYPE_REAL \
49 || PGM_GST_TYPE == PGM_TYPE_PROT
50# define GSTPT X86PT
51# define PGSTPT PX86PT
52# define GSTPTE X86PTE
53# define PGSTPTE PX86PTE
54# define GSTPD X86PD
55# define PGSTPD PX86PD
56# define GSTPDE X86PDE
57# define PGSTPDE PX86PDE
58# define GST_BIG_PAGE_SIZE X86_PAGE_4M_SIZE
59# define GST_BIG_PAGE_OFFSET_MASK X86_PAGE_4M_OFFSET_MASK
60# define GST_PDE_PG_MASK X86_PDE_PG_MASK
61# define GST_PDE_BIG_PG_MASK X86_PDE4M_PG_MASK
62# define GST_GET_PDE_BIG_PG_GCPHYS(PdeGst) pgmGstGet4MBPhysPage(&pVM->pgm.s, PdeGst)
63# define GST_PD_SHIFT X86_PD_SHIFT
64# define GST_PD_MASK X86_PD_MASK
65# define GST_TOTAL_PD_ENTRIES X86_PG_ENTRIES
66# define GST_PTE_PG_MASK X86_PTE_PG_MASK
67# define GST_PT_SHIFT X86_PT_SHIFT
68# define GST_PT_MASK X86_PT_MASK
69# define GST_CR3_PAGE_MASK X86_CR3_PAGE_MASK
70#elif PGM_GST_TYPE == PGM_TYPE_PAE \
71 || PGM_GST_TYPE == PGM_TYPE_AMD64
72# define GSTPT X86PTPAE
73# define PGSTPT PX86PTPAE
74# define GSTPTE X86PTEPAE
75# define PGSTPTE PX86PTEPAE
76# define GSTPD X86PDPAE
77# define PGSTPD PX86PDPAE
78# define GSTPDE X86PDEPAE
79# define PGSTPDE PX86PDEPAE
80# define GST_BIG_PAGE_SIZE X86_PAGE_2M_SIZE
81# define GST_BIG_PAGE_OFFSET_MASK X86_PAGE_2M_OFFSET_MASK
82# define GST_PDE_PG_MASK X86_PDE_PAE_PG_MASK
83# define GST_PDE_BIG_PG_MASK X86_PDE2M_PAE_PG_MASK
84# define GST_GET_PDE_BIG_PG_GCPHYS(PdeGst) (PdeGst.u & GST_PDE_BIG_PG_MASK)
85# define GST_PD_SHIFT X86_PD_PAE_SHIFT
86# define GST_PD_MASK X86_PD_PAE_MASK
87# if PGM_GST_TYPE == PGM_TYPE_PAE
88# define GST_TOTAL_PD_ENTRIES (X86_PG_PAE_ENTRIES * X86_PG_PAE_PDPE_ENTRIES)
89# define GST_PDPE_ENTRIES X86_PG_PAE_PDPE_ENTRIES
90# else
91# define GST_TOTAL_PD_ENTRIES (X86_PG_AMD64_ENTRIES * X86_PG_AMD64_PDPE_ENTRIES)
92# define GST_PDPE_ENTRIES X86_PG_AMD64_PDPE_ENTRIES
93# endif
94# define GST_PTE_PG_MASK X86_PTE_PAE_PG_MASK
95# define GST_PT_SHIFT X86_PT_PAE_SHIFT
96# define GST_PT_MASK X86_PT_PAE_MASK
97# define GST_CR3_PAGE_MASK X86_CR3_PAE_PAGE_MASK
98#endif
99
100
101/*******************************************************************************
102* Internal Functions *
103*******************************************************************************/
104__BEGIN_DECLS
105/* r3 */
106PGM_GST_DECL(int, InitData)(PVM pVM, PPGMMODEDATA pModeData, bool fResolveGCAndR0);
107PGM_GST_DECL(int, Enter)(PVM pVM, RTGCPHYS GCPhysCR3);
108PGM_GST_DECL(int, Relocate)(PVM pVM, RTGCUINTPTR offDelta);
109PGM_GST_DECL(int, Exit)(PVM pVM);
110
111static DECLCALLBACK(int) pgmR3Gst32BitWriteHandlerCR3(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
112static DECLCALLBACK(int) pgmR3GstPAEWriteHandlerCR3(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
113#if 0
114static DECLCALLBACK(int) pgmR3GstPAEWriteHandlerPD(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
115#endif
116
117/* all */
118PGM_GST_DECL(int, GetPage)(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys);
119PGM_GST_DECL(int, ModifyPage)(PVM pVM, RTGCUINTPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask);
120PGM_GST_DECL(int, GetPDE)(PVM pVM, RTGCUINTPTR GCPtr, PX86PDEPAE pPDE);
121PGM_GST_DECL(int, MapCR3)(PVM pVM, RTGCPHYS GCPhysCR3);
122PGM_GST_DECL(int, UnmapCR3)(PVM pVM);
123PGM_GST_DECL(int, MonitorCR3)(PVM pVM, RTGCPHYS GCPhysCR3);
124PGM_GST_DECL(int, UnmonitorCR3)(PVM pVM);
125__END_DECLS
126
127
128/**
129 * Initializes the guest bit of the paging mode data.
130 *
131 * @returns VBox status code.
132 * @param pVM The VM handle.
133 * @param fResolveGCAndR0 Indicate whether or not GC and Ring-0 symbols can be resolved now.
134 * This is used early in the init process to avoid trouble with PDM
135 * not being initialized yet.
136 */
137PGM_GST_DECL(int, InitData)(PVM pVM, PPGMMODEDATA pModeData, bool fResolveGCAndR0)
138{
139 Assert(pModeData->uGstType == PGM_GST_TYPE);
140
141 /* Ring-3 */
142 pModeData->pfnR3GstRelocate = PGM_GST_NAME(Relocate);
143 pModeData->pfnR3GstExit = PGM_GST_NAME(Exit);
144 pModeData->pfnR3GstGetPDE = PGM_GST_NAME(GetPDE);
145 pModeData->pfnR3GstGetPage = PGM_GST_NAME(GetPage);
146 pModeData->pfnR3GstModifyPage = PGM_GST_NAME(ModifyPage);
147 pModeData->pfnR3GstMapCR3 = PGM_GST_NAME(MapCR3);
148 pModeData->pfnR3GstUnmapCR3 = PGM_GST_NAME(UnmapCR3);
149 pModeData->pfnR3GstMonitorCR3 = PGM_GST_NAME(MonitorCR3);
150 pModeData->pfnR3GstUnmonitorCR3 = PGM_GST_NAME(UnmonitorCR3);
151
152#if PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE
153 pModeData->pfnR3GstWriteHandlerCR3 = PGM_GST_NAME(WriteHandlerCR3);
154 pModeData->pszR3GstWriteHandlerCR3 = "Guest CR3 Write access handler";
155 pModeData->pfnR3GstPAEWriteHandlerCR3 = PGM_GST_NAME(WriteHandlerCR3);
156 pModeData->pszR3GstPAEWriteHandlerCR3 = "Guest CR3 Write access handler (PAE)";
157#else
158 pModeData->pfnR3GstWriteHandlerCR3 = NULL;
159 pModeData->pszR3GstWriteHandlerCR3 = NULL;
160 pModeData->pfnR3GstPAEWriteHandlerCR3 = NULL;
161 pModeData->pszR3GstPAEWriteHandlerCR3 = NULL;
162#endif
163
164 if (fResolveGCAndR0)
165 {
166 int rc;
167
168#if PGM_SHW_TYPE != PGM_TYPE_AMD64 /* No AMD64 for traditional virtualization, only VT-x and AMD-V. */
169 /* GC */
170 rc = PDMR3GetSymbolGC(pVM, NULL, PGM_GST_NAME_GC_STR(GetPage), &pModeData->pfnGCGstGetPage);
171 AssertMsgRCReturn(rc, ("%s -> rc=%Vrc\n", PGM_GST_NAME_GC_STR(GetPage), rc), rc);
172 rc = PDMR3GetSymbolGC(pVM, NULL, PGM_GST_NAME_GC_STR(ModifyPage), &pModeData->pfnGCGstModifyPage);
173 AssertMsgRCReturn(rc, ("%s -> rc=%Vrc\n", PGM_GST_NAME_GC_STR(ModifyPage), rc), rc);
174 rc = PDMR3GetSymbolGC(pVM, NULL, PGM_GST_NAME_GC_STR(GetPDE), &pModeData->pfnGCGstGetPDE);
175 AssertMsgRCReturn(rc, ("%s -> rc=%Vrc\n", PGM_GST_NAME_GC_STR(GetPDE), rc), rc);
176 rc = PDMR3GetSymbolGC(pVM, NULL, PGM_GST_NAME_GC_STR(MonitorCR3), &pModeData->pfnGCGstMonitorCR3);
177 AssertMsgRCReturn(rc, ("%s -> rc=%Vrc\n", PGM_GST_NAME_GC_STR(MonitorCR3), rc), rc);
178 rc = PDMR3GetSymbolGC(pVM, NULL, PGM_GST_NAME_GC_STR(UnmonitorCR3), &pModeData->pfnGCGstUnmonitorCR3);
179 AssertMsgRCReturn(rc, ("%s -> rc=%Vrc\n", PGM_GST_NAME_GC_STR(UnmonitorCR3), rc), rc);
180 rc = PDMR3GetSymbolGC(pVM, NULL, PGM_GST_NAME_GC_STR(MapCR3), &pModeData->pfnGCGstMapCR3);
181 AssertMsgRCReturn(rc, ("%s -> rc=%Vrc\n", PGM_GST_NAME_GC_STR(MapCR3), rc), rc);
182 rc = PDMR3GetSymbolGC(pVM, NULL, PGM_GST_NAME_GC_STR(UnmapCR3), &pModeData->pfnGCGstUnmapCR3);
183 AssertMsgRCReturn(rc, ("%s -> rc=%Vrc\n", PGM_GST_NAME_GC_STR(UnmapCR3), rc), rc);
184# if PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE
185 rc = PDMR3GetSymbolGC(pVM, NULL, PGM_GST_NAME_GC_STR(WriteHandlerCR3), &pModeData->pfnGCGstWriteHandlerCR3);
186 AssertMsgRCReturn(rc, ("%s -> rc=%Vrc\n", PGM_GST_NAME_GC_STR(WriteHandlerCR3), rc), rc);
187 rc = PDMR3GetSymbolGC(pVM, NULL, PGM_GST_NAME_GC_STR(WriteHandlerCR3), &pModeData->pfnGCGstPAEWriteHandlerCR3);
188 AssertMsgRCReturn(rc, ("%s -> rc=%Vrc\n", PGM_GST_NAME_GC_STR(PAEWriteHandlerCR3), rc), rc);
189# endif
190#endif /* Not AMD64 shadow paging. */
191
192 /* Ring-0 */
193 rc = PDMR3GetSymbolR0(pVM, NULL, PGM_GST_NAME_R0_STR(GetPage), &pModeData->pfnR0GstGetPage);
194 AssertMsgRCReturn(rc, ("%s -> rc=%Vrc\n", PGM_GST_NAME_R0_STR(GetPage), rc), rc);
195 rc = PDMR3GetSymbolR0(pVM, NULL, PGM_GST_NAME_R0_STR(ModifyPage), &pModeData->pfnR0GstModifyPage);
196 AssertMsgRCReturn(rc, ("%s -> rc=%Vrc\n", PGM_GST_NAME_R0_STR(ModifyPage), rc), rc);
197 rc = PDMR3GetSymbolR0(pVM, NULL, PGM_GST_NAME_R0_STR(GetPDE), &pModeData->pfnR0GstGetPDE);
198 AssertMsgRCReturn(rc, ("%s -> rc=%Vrc\n", PGM_GST_NAME_R0_STR(GetPDE), rc), rc);
199 rc = PDMR3GetSymbolR0(pVM, NULL, PGM_GST_NAME_R0_STR(MonitorCR3), &pModeData->pfnR0GstMonitorCR3);
200 AssertMsgRCReturn(rc, ("%s -> rc=%Vrc\n", PGM_GST_NAME_R0_STR(MonitorCR3), rc), rc);
201 rc = PDMR3GetSymbolR0(pVM, NULL, PGM_GST_NAME_R0_STR(UnmonitorCR3), &pModeData->pfnR0GstUnmonitorCR3);
202 AssertMsgRCReturn(rc, ("%s -> rc=%Vrc\n", PGM_GST_NAME_R0_STR(UnmonitorCR3), rc), rc);
203 rc = PDMR3GetSymbolR0(pVM, NULL, PGM_GST_NAME_R0_STR(MapCR3), &pModeData->pfnR0GstMapCR3);
204 AssertMsgRCReturn(rc, ("%s -> rc=%Vrc\n", PGM_GST_NAME_R0_STR(MapCR3), rc), rc);
205 rc = PDMR3GetSymbolR0(pVM, NULL, PGM_GST_NAME_R0_STR(UnmapCR3), &pModeData->pfnR0GstUnmapCR3);
206 AssertMsgRCReturn(rc, ("%s -> rc=%Vrc\n", PGM_GST_NAME_R0_STR(UnmapCR3), rc), rc);
207#if PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE
208 rc = PDMR3GetSymbolR0(pVM, NULL, PGM_GST_NAME_R0_STR(WriteHandlerCR3), &pModeData->pfnR0GstWriteHandlerCR3);
209 AssertMsgRCReturn(rc, ("%s -> rc=%Vrc\n", PGM_GST_NAME_R0_STR(WriteHandlerCR3), rc), rc);
210 rc = PDMR3GetSymbolR0(pVM, NULL, PGM_GST_NAME_R0_STR(WriteHandlerCR3), &pModeData->pfnR0GstPAEWriteHandlerCR3);
211 AssertMsgRCReturn(rc, ("%s -> rc=%Vrc\n", PGM_GST_NAME_R0_STR(PAEWriteHandlerCR3), rc), rc);
212#endif
213 }
214
215 return VINF_SUCCESS;
216}
217
218
219/**
220 * Enters the guest mode.
221 *
222 * @returns VBox status code.
223 * @param pVM VM handle.
224 * @param GCPhysCR3 The physical address from the CR3 register.
225 */
226PGM_GST_DECL(int, Enter)(PVM pVM, RTGCPHYS GCPhysCR3)
227{
228 /*
229 * Map and monitor CR3
230 */
231 int rc = PGM_GST_NAME(MapCR3)(pVM, GCPhysCR3);
232 if (VBOX_SUCCESS(rc) && !pVM->pgm.s.fMappingsFixed)
233 rc = PGM_GST_NAME(MonitorCR3)(pVM, GCPhysCR3);
234 return rc;
235}
236
237
238/**
239 * Relocate any GC pointers related to guest mode paging.
240 *
241 * @returns VBox status code.
242 * @param pVM The VM handle.
243 * @param offDelta The reloation offset.
244 */
245PGM_GST_DECL(int, Relocate)(PVM pVM, RTGCUINTPTR offDelta)
246{
247 /* nothing special to do here - InitData does the job. */
248 return VINF_SUCCESS;
249}
250
251
252/**
253 * Exits the guest mode.
254 *
255 * @returns VBox status code.
256 * @param pVM VM handle.
257 */
258PGM_GST_DECL(int, Exit)(PVM pVM)
259{
260 int rc = PGM_GST_NAME(UnmonitorCR3)(pVM);
261 if (VBOX_SUCCESS(rc))
262 rc = PGM_GST_NAME(UnmapCR3)(pVM);
263 return rc;
264}
265
266
267#if PGM_GST_TYPE == PGM_TYPE_32BIT
268/**
269 * Physical write access for the Guest CR3 in 32-bit mode.
270 *
271 * @returns VINF_SUCCESS if the handler have carried out the operation.
272 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
273 * @param pVM VM Handle.
274 * @param GCPhys The physical address the guest is writing to.
275 * @param pvPhys The HC mapping of that address.
276 * @param pvBuf What the guest is reading/writing.
277 * @param cbBuf How much it's reading/writing.
278 * @param enmAccessType The access type.
279 * @param pvUser User argument.
280 */
281static DECLCALLBACK(int) pgmR3Gst32BitWriteHandlerCR3(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
282{
283 AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));
284 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
285 Log2(("pgmR3Gst32BitWriteHandlerCR3: ff=%#x GCPhys=%VGp pvPhys=%p cbBuf=%d pvBuf={%.*Vhxs}\n", pVM->fForcedActions, GCPhys, pvPhys, cbBuf, cbBuf, pvBuf));
286
287 /*
288 * Do the write operation.
289 */
290 memcpy(pvPhys, pvBuf, cbBuf);
291 if ( !pVM->pgm.s.fMappingsFixed
292 && !VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL))
293 {
294 /*
295 * Check for conflicts.
296 */
297 const RTGCUINTPTR offPD = GCPhys & PAGE_OFFSET_MASK;
298 const unsigned iPD1 = offPD / sizeof(X86PDE);
299 const unsigned iPD2 = (offPD + cbBuf - 1) / sizeof(X86PDE);
300 Assert(iPD1 - iPD2 <= 1);
301 if ( ( pVM->pgm.s.pGuestPDHC->a[iPD1].n.u1Present
302 && pgmGetMapping(pVM, iPD1 << X86_PD_SHIFT) )
303 || ( iPD1 != iPD2
304 && pVM->pgm.s.pGuestPDHC->a[iPD2].n.u1Present
305 && pgmGetMapping(pVM, iPD2 << X86_PD_SHIFT) )
306 )
307 {
308 Log(("pgmR3Gst32BitWriteHandlerCR3: detected conflict. iPD1=%#x iPD2=%#x GCPhys=%VGp\n", iPD1, iPD2, GCPhys));
309 STAM_COUNTER_INC(&pVM->pgm.s.StatHCGuestPDWriteConflict);
310 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
311 }
312 }
313
314 STAM_COUNTER_INC(&pVM->pgm.s.StatHCGuestPDWrite);
315 return VINF_SUCCESS;
316}
317#endif /* 32BIT */
318
319
320#if PGM_GST_TYPE == PGM_TYPE_PAE
321/**
322 * Physical write access handler for the Guest CR3 in PAE mode.
323 *
324 * @returns VINF_SUCCESS if the handler have carried out the operation.
325 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
326 * @param pVM VM Handle.
327 * @param GCPhys The physical address the guest is writing to.
328 * @param pvPhys The HC mapping of that address.
329 * @param pvBuf What the guest is reading/writing.
330 * @param cbBuf How much it's reading/writing.
331 * @param enmAccessType The access type.
332 * @param pvUser User argument.
333 */
334static DECLCALLBACK(int) pgmR3GstPAEWriteHandlerCR3(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
335{
336 AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));
337 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
338 Log2(("pgmR3GstPAEWriteHandlerCR3: ff=%#x GCPhys=%VGp pvPhys=%p cbBuf=%d pvBuf={%.*Vhxs}\n", pVM->fForcedActions, GCPhys, pvPhys, cbBuf, cbBuf, pvBuf));
339
340 /*
341 * Do the write operation.
342 */
343 memcpy(pvPhys, pvBuf, cbBuf);
344 if ( !pVM->pgm.s.fMappingsFixed
345 && !VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL))
346 {
347 /*
348 * Check if any of the PDs have changed.
349 * We'll simply check all of them instead of figuring out which one/two to check.
350 */
351 for (unsigned i = 0; i < 4; i++)
352 {
353 if ( pVM->pgm.s.pGstPaePDPTHC->a[i].n.u1Present
354 && (pVM->pgm.s.pGstPaePDPTHC->a[i].u & X86_PDPE_PG_MASK) != pVM->pgm.s.aGCPhysGstPaePDsMonitored[i])
355 {
356 Log(("pgmR3GstPAEWriteHandlerCR3: detected updated PDPE; [%d] = %#llx, Old GCPhys=%VGp\n",
357 i, pVM->pgm.s.pGstPaePDPTHC->a[i].u, pVM->pgm.s.aGCPhysGstPaePDsMonitored[i]));
358 /*
359 * The PD has changed.
360 * We will schedule a monitoring update for the next TLB Flush,
361 * InvalidatePage or SyncCR3.
362 *
363 * This isn't perfect, because a lazy page sync might be dealing with an half
364 * updated PDPE. However, we assume that the guest OS is disabling interrupts
365 * and being extremely careful (cmpxchg8b) when updating a PDPE where it's
366 * executing.
367 */
368 pVM->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
369 }
370 }
371 }
372 /*
373 * Flag a updating of the monitor at the next crossroad so we don't monitor the
374 * wrong pages for soo long that they can be reused as code pages and freak out
375 * the recompiler or something.
376 */
377 else
378 pVM->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
379
380
381 STAM_COUNTER_INC(&pVM->pgm.s.StatHCGuestPDWrite);
382 return VINF_SUCCESS;
383}
384
385# if 0
386/**
387 * Physical write access for Guest CR3.
388 *
389 * @returns VINF_SUCCESS if the handler have carried out the operation.
390 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
391 * @param pVM VM Handle.
392 * @param GCPhys The physical address the guest is writing to.
393 * @param pvPhys The HC mapping of that address.
394 * @param pvBuf What the guest is reading/writing.
395 * @param cbBuf How much it's reading/writing.
396 * @param enmAccessType The access type.
397 * @param pvUser User argument.
398 */
399static DECLCALLBACK(int) pgmR3GstPAEWriteHandlerPD(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
400{
401 AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));
402 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
403 Log2(("pgmR3GstPAEWriteHandlerPD: ff=%#x GCPhys=%VGp pvPhys=%p cbBuf=%d pvBuf={%.*Vhxs}\n", pVM->fForcedActions, GCPhys, pvPhys, cbBuf, cbBuf, pvBuf));
404
405 /*
406 * Do the write operation.
407 */
408 memcpy(pvPhys, pvBuf, cbBuf);
409 if ( !pVM->pgm.s.fMappingsFixed
410 && !VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL))
411 {
412 /*
413 * Figure out which of the 4 PDs this is.
414 */
415 unsigned i;
416 for (i = 0; i < 4; i++)
417 if (pVM->pgm.s.pGstPaePDPTHC->a[i].u == (GCPhys & X86_PTE_PAE_PG_MASK))
418 {
419 PX86PDPAE pPDSrc = pgmGstGetPaePD(&pVM->pgm.s, i << X86_PDPT_SHIFT);
420 const RTGCUINTPTR offPD = GCPhys & PAGE_OFFSET_MASK;
421 const unsigned iPD1 = offPD / sizeof(X86PDEPAE);
422 const unsigned iPD2 = (offPD + cbBuf - 1) / sizeof(X86PDEPAE);
423 Assert(iPD1 - iPD2 <= 1);
424 if ( ( pPDSrc->a[iPD1].n.u1Present
425 && pgmGetMapping(pVM, (i << X86_PDPT_SHIFT) | (iPD1 << X86_PD_PAE_SHIFT)) )
426 || ( iPD1 != iPD2
427 && pPDSrc->a[iPD2].n.u1Present
428 && pgmGetMapping(pVM, (i << X86_PDPT_SHIFT) | (iPD2 << X86_PD_PAE_SHIFT)) )
429 )
430 {
431 Log(("pgmR3GstPaePD3WriteHandler: detected conflict. i=%d iPD1=%#x iPD2=%#x GCPhys=%VGp\n",
432 i, iPD1, iPD2, GCPhys));
433 STAM_COUNTER_INC(&pVM->pgm.s.StatHCGuestPDWriteConflict);
434 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
435 }
436 break; /* ASSUMES no duplicate entries... */
437 }
438 Assert(i < 4);
439 }
440
441 STAM_COUNTER_INC(&pVM->pgm.s.StatHCGuestPDWrite);
442 return VINF_SUCCESS;
443}
444# endif
445#endif /* PAE */
446
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette