VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllGst.h@ 10324

Last change on this file since 10324 was 10288, checked in by vboxsync, 17 years ago

Removed more unneeded defines

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 44.2 KB
Line 
1/* $Id: PGMAllGst.h 10288 2008-07-05 17:42:11Z vboxsync $ */
2/** @file
3 * VBox - Page Manager, Guest Paging Template - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Defined Constants And Macros *
25*******************************************************************************/
26#undef GSTPT
27#undef PGSTPT
28#undef GSTPTE
29#undef PGSTPTE
30#undef GSTPD
31#undef PGSTPD
32#undef GSTPDE
33#undef PGSTPDE
34#undef GST_BIG_PAGE_SIZE
35#undef GST_BIG_PAGE_OFFSET_MASK
36#undef GST_PDE_PG_MASK
37#undef GST_PDE_BIG_PG_MASK
38#undef GST_PD_SHIFT
39#undef GST_PD_MASK
40#undef GST_PTE_PG_MASK
41#undef GST_PT_SHIFT
42#undef GST_PT_MASK
43#undef GST_TOTAL_PD_ENTRIES
44#undef GST_CR3_PAGE_MASK
45#undef GST_PDPE_ENTRIES
46#undef GST_PDPT_SHIFT
47#undef GST_PDPT_MASK
48#undef GST_PDPE_PG_MASK
49
50#if PGM_GST_TYPE == PGM_TYPE_REAL \
51 || PGM_GST_TYPE == PGM_TYPE_PROT
52# define GSTPT SHWPT
53# define PGSTPT PSHWPT
54# define GSTPTE SHWPTE
55# define PGSTPTE PSHWPTE
56# define GSTPD SHWPD
57# define PGSTPD PSHWPD
58# define GSTPDE SHWPDE
59# define PGSTPDE PSHWPDE
60# define GST_PTE_PG_MASK SHW_PTE_PG_MASK
61#elif PGM_GST_TYPE == PGM_TYPE_32BIT
62# define GSTPT X86PT
63# define PGSTPT PX86PT
64# define GSTPTE X86PTE
65# define PGSTPTE PX86PTE
66# define GSTPD X86PD
67# define PGSTPD PX86PD
68# define GSTPDE X86PDE
69# define PGSTPDE PX86PDE
70# define GST_BIG_PAGE_SIZE X86_PAGE_4M_SIZE
71# define GST_BIG_PAGE_OFFSET_MASK X86_PAGE_4M_OFFSET_MASK
72# define GST_PDE_PG_MASK X86_PDE_PG_MASK
73# define GST_PDE_BIG_PG_MASK X86_PDE4M_PG_MASK
74# define GST_PD_SHIFT X86_PD_SHIFT
75# define GST_PD_MASK X86_PD_MASK
76# define GST_TOTAL_PD_ENTRIES X86_PG_ENTRIES
77# define GST_PTE_PG_MASK X86_PTE_PG_MASK
78# define GST_PT_SHIFT X86_PT_SHIFT
79# define GST_PT_MASK X86_PT_MASK
80# define GST_CR3_PAGE_MASK X86_CR3_PAGE_MASK
81#elif PGM_GST_TYPE == PGM_TYPE_PAE \
82 || PGM_GST_TYPE == PGM_TYPE_AMD64
83# define GSTPT X86PTPAE
84# define PGSTPT PX86PTPAE
85# define GSTPTE X86PTEPAE
86# define PGSTPTE PX86PTEPAE
87# define GSTPD X86PDPAE
88# define PGSTPD PX86PDPAE
89# define GSTPDE X86PDEPAE
90# define PGSTPDE PX86PDEPAE
91# define GST_BIG_PAGE_SIZE X86_PAGE_2M_SIZE
92# define GST_BIG_PAGE_OFFSET_MASK X86_PAGE_2M_OFFSET_MASK
93# define GST_PDE_PG_MASK X86_PDE_PAE_PG_MASK_FULL
94# define GST_PDE_BIG_PG_MASK X86_PDE2M_PAE_PG_MASK
95# define GST_PD_SHIFT X86_PD_PAE_SHIFT
96# define GST_PD_MASK X86_PD_PAE_MASK
97# if PGM_GST_TYPE == PGM_TYPE_PAE
98# define GST_TOTAL_PD_ENTRIES (X86_PG_PAE_ENTRIES * X86_PG_PAE_PDPE_ENTRIES)
99# define GST_PDPE_ENTRIES X86_PG_PAE_PDPE_ENTRIES
100# define GST_PDPE_PG_MASK X86_PDPE_PG_MASK_FULL
101# define GST_PDPT_SHIFT X86_PDPT_SHIFT
102# define GST_PDPT_MASK X86_PDPT_MASK_PAE
103# define GST_PTE_PG_MASK X86_PTE_PAE_PG_MASK
104# define GST_CR3_PAGE_MASK X86_CR3_PAE_PAGE_MASK
105# else
106# define GST_TOTAL_PD_ENTRIES (X86_PG_AMD64_ENTRIES * X86_PG_AMD64_PDPE_ENTRIES)
107# define GST_PDPE_ENTRIES X86_PG_AMD64_PDPE_ENTRIES
108# define GST_PDPT_SHIFT X86_PDPT_SHIFT
109# define GST_PDPE_PG_MASK X86_PDPE_PG_MASK_FULL
110# define GST_PDPT_MASK X86_PDPT_MASK_AMD64
111# define GST_PTE_PG_MASK X86_PTE_PAE_PG_MASK_FULL
112# define GST_CR3_PAGE_MASK X86_CR3_AMD64_PAGE_MASK
113# endif
114# define GST_PT_SHIFT X86_PT_PAE_SHIFT
115# define GST_PT_MASK X86_PT_PAE_MASK
116#endif
117
118
119/*******************************************************************************
120* Internal Functions *
121*******************************************************************************/
122__BEGIN_DECLS
123PGM_GST_DECL(int, GetPage)(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys);
124PGM_GST_DECL(int, ModifyPage)(PVM pVM, RTGCUINTPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask);
125PGM_GST_DECL(int, GetPDE)(PVM pVM, RTGCUINTPTR GCPtr, PX86PDEPAE pPDE);
126PGM_GST_DECL(int, MapCR3)(PVM pVM, RTGCPHYS GCPhysCR3);
127PGM_GST_DECL(int, UnmapCR3)(PVM pVM);
128PGM_GST_DECL(int, MonitorCR3)(PVM pVM, RTGCPHYS GCPhysCR3);
129PGM_GST_DECL(int, UnmonitorCR3)(PVM pVM);
130PGM_GST_DECL(bool, HandlerVirtualUpdate)(PVM pVM, uint32_t cr4);
131#ifndef IN_RING3
132PGM_GST_DECL(int, WriteHandlerCR3)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
133# if PGM_GST_TYPE == PGM_TYPE_PAE \
134 || PGM_GST_TYPE == PGM_TYPE_AMD64
135PGM_GST_DECL(int, PAEWriteHandlerPD)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
136# endif
137#endif
138__END_DECLS
139
140
141
142/**
143 * Gets effective Guest OS page information.
144 *
145 * When GCPtr is in a big page, the function will return as if it was a normal
146 * 4KB page. If the need for distinguishing between big and normal page becomes
147 * necessary at a later point, a PGMGstGetPage Ex() will be created for that
148 * purpose.
149 *
150 * @returns VBox status.
151 * @param pVM VM Handle.
152 * @param GCPtr Guest Context virtual address of the page. Page aligned!
153 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
154 * @param pGCPhys Where to store the GC physical address of the page.
155 * This is page aligned. The fact that the
156 */
157PGM_GST_DECL(int, GetPage)(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
158{
159#if PGM_GST_TYPE == PGM_TYPE_REAL \
160 || PGM_GST_TYPE == PGM_TYPE_PROT
161 /*
162 * Fake it.
163 */
164 if (pfFlags)
165 *pfFlags = X86_PTE_P | X86_PTE_RW | X86_PTE_US;
166 if (pGCPhys)
167 *pGCPhys = GCPtr & PAGE_BASE_GC_MASK;
168 return VINF_SUCCESS;
169
170#elif PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64
171
172 /*
173 * Get the PDE.
174 */
175# if PGM_GST_TYPE == PGM_TYPE_32BIT
176 const X86PDE Pde = CTXSUFF(pVM->pgm.s.pGuestPD)->a[GCPtr >> X86_PD_SHIFT];
177#elif PGM_GST_TYPE == PGM_TYPE_PAE
178 X86PDEPAE Pde;
179 bool fNoExecuteBitValid = !!(CPUMGetGuestEFER(pVM) & MSR_K6_EFER_NXE);
180
181 /* pgmGstGetPaePDE will return 0 if the PDPTE is marked as not present
182 * All the other bits in the PDPTE are only valid in long mode (r/w, u/s, nx)
183 */
184 Pde.u = pgmGstGetPaePDE(&pVM->pgm.s, GCPtr);
185#elif PGM_GST_TYPE == PGM_TYPE_AMD64
186 PX86PML4E pPml4e;
187 X86PDPE Pdpe;
188 X86PDEPAE Pde;
189 bool fNoExecuteBitValid = !!(CPUMGetGuestEFER(pVM) & MSR_K6_EFER_NXE);
190
191 Pde.u = pgmGstGetLongModePDE(&pVM->pgm.s, GCPtr, &pPml4e, &Pdpe);
192 Assert(pPml4e);
193 if (!(pPml4e->n.u1Present & Pdpe.n.u1Present))
194 return VERR_PAGE_TABLE_NOT_PRESENT;
195
196 /* Merge accessed, write, user and no-execute bits into the PDE. */
197 Pde.n.u1Accessed &= pPml4e->n.u1Accessed & Pdpe.lm.u1Accessed;
198 Pde.n.u1Write &= pPml4e->n.u1Write & Pdpe.lm.u1Write;
199 Pde.n.u1User &= pPml4e->n.u1User & Pdpe.lm.u1User;
200 Pde.n.u1NoExecute &= pPml4e->n.u1NoExecute & Pdpe.lm.u1NoExecute;
201# endif
202
203 /*
204 * Lookup the page.
205 */
206 if (!Pde.n.u1Present)
207 return VERR_PAGE_TABLE_NOT_PRESENT;
208
209 if ( !Pde.b.u1Size
210# if PGM_GST_TYPE != PGM_TYPE_AMD64
211 || !(CPUMGetGuestCR4(pVM) & X86_CR4_PSE)
212# endif
213 )
214 {
215 PGSTPT pPT;
216 int rc = PGM_GCPHYS_2_PTR(pVM, Pde.u & GST_PDE_PG_MASK, &pPT);
217 if (VBOX_FAILURE(rc))
218 return rc;
219
220 /*
221 * Get PT entry and check presence.
222 */
223 const GSTPTE Pte = pPT->a[(GCPtr >> GST_PT_SHIFT) & GST_PT_MASK];
224 if (!Pte.n.u1Present)
225 return VERR_PAGE_NOT_PRESENT;
226
227 /*
228 * Store the result.
229 * RW and US flags depend on all levels (bitwise AND) - except for legacy PAE
230 * where the PDPE is simplified.
231 */
232 if (pfFlags)
233 {
234 *pfFlags = (Pte.u & ~GST_PTE_PG_MASK)
235 & ((Pde.u & (X86_PTE_RW | X86_PTE_US)) | ~(uint64_t)(X86_PTE_RW | X86_PTE_US));
236# if PGM_WITH_NX(PGM_GST_TYPE)
237 /* The NX bit is determined by a bitwise OR between the PT and PD */
238 if (fNoExecuteBitValid)
239 *pfFlags |= (Pte.u & Pde.u & X86_PTE_PAE_NX);
240# endif
241 }
242 if (pGCPhys)
243 *pGCPhys = Pte.u & GST_PTE_PG_MASK;
244 }
245 else
246 {
247 /*
248 * Map big to 4k PTE and store the result
249 */
250 if (pfFlags)
251 {
252 *pfFlags = (Pde.u & ~(GST_PTE_PG_MASK | X86_PTE_PAT))
253 | ((Pde.u & X86_PDE4M_PAT) >> X86_PDE4M_PAT_SHIFT);
254# if PGM_WITH_NX(PGM_GST_TYPE)
255 /* The NX bit is determined by a bitwise OR between the PT and PD */
256 if (fNoExecuteBitValid)
257 *pfFlags |= (Pde.u & X86_PTE_PAE_NX);
258# endif
259 }
260 if (pGCPhys)
261 *pGCPhys = (Pde.u & GST_PDE_BIG_PG_MASK) | (GCPtr & (~GST_PDE_BIG_PG_MASK ^ ~GST_PTE_PG_MASK)); /** @todo pse36 */
262 }
263 return VINF_SUCCESS;
264#else
265# error "shouldn't be here!"
266 /* something else... */
267 return VERR_NOT_SUPPORTED;
268#endif
269}
270
271
272/**
273 * Modify page flags for a range of pages in the guest's tables
274 *
275 * The existing flags are ANDed with the fMask and ORed with the fFlags.
276 *
277 * @returns VBox status code.
278 * @param pVM VM handle.
279 * @param GCPtr Virtual address of the first page in the range. Page aligned!
280 * @param cb Size (in bytes) of the page range to apply the modification to. Page aligned!
281 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
282 * @param fMask The AND mask - page flags X86_PTE_*.
283 */
284PGM_GST_DECL(int, ModifyPage)(PVM pVM, RTGCUINTPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
285{
286#if PGM_GST_TYPE == PGM_TYPE_32BIT \
287 || PGM_GST_TYPE == PGM_TYPE_PAE \
288 || PGM_GST_TYPE == PGM_TYPE_AMD64
289
290 for (;;)
291 {
292 /*
293 * Get the PD entry.
294 */
295# if PGM_GST_TYPE == PGM_TYPE_32BIT
296 PX86PDE pPde = &CTXSUFF(pVM->pgm.s.pGuestPD)->a[GCPtr >> X86_PD_SHIFT];
297# elif PGM_GST_TYPE == PGM_TYPE_PAE
298 /* pgmGstGetPaePDEPtr will return 0 if the PDPTE is marked as not present
299 * All the other bits in the PDPTE are only valid in long mode (r/w, u/s, nx)
300 */
301 PX86PDEPAE pPde = pgmGstGetPaePDEPtr(&pVM->pgm.s, GCPtr);
302 Assert(pPde);
303 if (!pPde)
304 return VERR_PAGE_TABLE_NOT_PRESENT;
305# elif PGM_GST_TYPE == PGM_TYPE_AMD64
306 /** @todo Setting the r/w, u/s & nx bits might have no effect depending on the pdpte & pml4 values */
307 PX86PDEPAE pPde = pgmGstGetLongModePDEPtr(&pVM->pgm.s, GCPtr);
308 Assert(pPde);
309 if (!pPde)
310 return VERR_PAGE_TABLE_NOT_PRESENT;
311# endif
312 GSTPDE Pde = *pPde;
313 Assert(Pde.n.u1Present);
314 if (!Pde.n.u1Present)
315 return VERR_PAGE_TABLE_NOT_PRESENT;
316
317 if ( !Pde.b.u1Size
318# if PGM_GST_TYPE != PGM_TYPE_AMD64
319 || !(CPUMGetGuestCR4(pVM) & X86_CR4_PSE)
320# endif
321 )
322 {
323 /*
324 * 4KB Page table
325 *
326 * Walk page tables and pages till we're done.
327 */
328 PGSTPT pPT;
329 int rc = PGM_GCPHYS_2_PTR(pVM, Pde.u & GST_PDE_PG_MASK, &pPT);
330 if (VBOX_FAILURE(rc))
331 return rc;
332
333 unsigned iPTE = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
334 while (iPTE < RT_ELEMENTS(pPT->a))
335 {
336 GSTPTE Pte = pPT->a[iPTE];
337 Pte.u = (Pte.u & (fMask | X86_PTE_PAE_PG_MASK))
338 | (fFlags & ~GST_PTE_PG_MASK);
339 pPT->a[iPTE] = Pte;
340
341 /* next page */
342 cb -= PAGE_SIZE;
343 if (!cb)
344 return VINF_SUCCESS;
345 GCPtr += PAGE_SIZE;
346 iPTE++;
347 }
348 }
349 else
350 {
351 /*
352 * 4MB Page table
353 */
354 Pde.u = (Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PS)) /** @todo pse36 */
355 | (fFlags & ~GST_PTE_PG_MASK)
356 | ((fFlags & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT);
357 *pPde = Pde;
358
359 /* advance */
360 const unsigned cbDone = GST_BIG_PAGE_SIZE - (GCPtr & GST_BIG_PAGE_OFFSET_MASK);
361 if (cbDone >= cb)
362 return VINF_SUCCESS;
363 cb -= cbDone;
364 GCPtr += cbDone;
365 }
366 }
367
368#else
369 /* real / protected mode: ignore. */
370 return VINF_SUCCESS;
371#endif
372}
373
374
375/**
376 * Retrieve guest PDE information
377 *
378 * @returns VBox status code.
379 * @param pVM The virtual machine.
380 * @param GCPtr Guest context pointer
381 * @param pPDE Pointer to guest PDE structure
382 */
383PGM_GST_DECL(int, GetPDE)(PVM pVM, RTGCUINTPTR GCPtr, PX86PDEPAE pPDE)
384{
385#if PGM_GST_TYPE == PGM_TYPE_32BIT \
386 || PGM_GST_TYPE == PGM_TYPE_PAE \
387 || PGM_GST_TYPE == PGM_TYPE_AMD64
388
389# if PGM_GST_TYPE == PGM_TYPE_32BIT
390 X86PDE Pde;
391 Pde = CTXSUFF(pVM->pgm.s.pGuestPD)->a[GCPtr >> GST_PD_SHIFT];
392# elif PGM_GST_TYPE == PGM_TYPE_PAE
393 X86PDEPAE Pde;
394 Pde.u = pgmGstGetPaePDE(&pVM->pgm.s, GCPtr);
395# elif PGM_GST_TYPE == PGM_TYPE_AMD64
396 X86PDEPAE Pde;
397 Pde.u = pgmGstGetLongModePDE(&pVM->pgm.s, GCPtr);
398# endif
399
400 pPDE->u = (X86PGPAEUINT)Pde.u;
401 return VINF_SUCCESS;
402#else
403 AssertFailed();
404 return VERR_NOT_IMPLEMENTED;
405#endif
406}
407
408
409
410/**
411 * Maps the CR3 into HMA in GC and locate it in HC.
412 *
413 * Note that a MapCR3 call is usually not followed by an UnmapCR3 call; whenever
414 * CR3 is updated we simply call MapCR3 again.
415 *
416 * @returns VBox status, no specials.
417 * @param pVM VM handle.
418 * @param GCPhysCR3 The physical address in the CR3 register.
419 */
420PGM_GST_DECL(int, MapCR3)(PVM pVM, RTGCPHYS GCPhysCR3)
421{
422#if PGM_GST_TYPE == PGM_TYPE_32BIT \
423 || PGM_GST_TYPE == PGM_TYPE_PAE \
424 || PGM_GST_TYPE == PGM_TYPE_AMD64
425
426 LogFlow(("MapCR3: %VGp\n", GCPhysCR3));
427
428 /*
429 * Map the page CR3 points at.
430 */
431 RTHCPHYS HCPhysGuestCR3;
432 RTHCPTR HCPtrGuestCR3;
433 int rc = pgmRamGCPhys2HCPtrAndHCPhysWithFlags(&pVM->pgm.s, GCPhysCR3 & GST_CR3_PAGE_MASK, &HCPtrGuestCR3, &HCPhysGuestCR3);
434 if (VBOX_SUCCESS(rc))
435 {
436 rc = PGMMap(pVM, (RTGCUINTPTR)pVM->pgm.s.GCPtrCR3Mapping, HCPhysGuestCR3, PAGE_SIZE, 0);
437 if (VBOX_SUCCESS(rc))
438 {
439 PGM_INVL_PG(pVM->pgm.s.GCPtrCR3Mapping);
440# if PGM_GST_TYPE == PGM_TYPE_32BIT
441 pVM->pgm.s.pGuestPDHC = (R3R0PTRTYPE(PX86PD))HCPtrGuestCR3;
442 pVM->pgm.s.pGuestPDGC = (RCPTRTYPE(PX86PD))pVM->pgm.s.GCPtrCR3Mapping;
443
444# elif PGM_GST_TYPE == PGM_TYPE_PAE
445 unsigned offset = GCPhysCR3 & GST_CR3_PAGE_MASK & PAGE_OFFSET_MASK;
446 pVM->pgm.s.pGstPaePDPTHC = (R3R0PTRTYPE(PX86PDPT)) HCPtrGuestCR3;
447 pVM->pgm.s.pGstPaePDPTGC = (RCPTRTYPE(PX86PDPT)) ((RCPTRTYPE(uint8_t *))pVM->pgm.s.GCPtrCR3Mapping + offset);
448 Log(("Cached mapping %VGv\n", pVM->pgm.s.pGstPaePDPTGC));
449
450 /*
451 * Map the 4 PDs too.
452 */
453 RTGCUINTPTR GCPtr = (RTGCUINTPTR)pVM->pgm.s.GCPtrCR3Mapping + PAGE_SIZE;
454 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++, GCPtr += PAGE_SIZE)
455 {
456 if (pVM->pgm.s.CTXSUFF(pGstPaePDPT)->a[i].n.u1Present)
457 {
458 RTHCPTR HCPtr;
459 RTHCPHYS HCPhys;
460 RTGCPHYS GCPhys = pVM->pgm.s.CTXSUFF(pGstPaePDPT)->a[i].u & X86_PDPE_PG_MASK;
461 int rc2 = pgmRamGCPhys2HCPtrAndHCPhysWithFlags(&pVM->pgm.s, GCPhys, &HCPtr, &HCPhys);
462 if (VBOX_SUCCESS(rc2))
463 {
464 rc = PGMMap(pVM, GCPtr, HCPhys & X86_PTE_PAE_PG_MASK, PAGE_SIZE, 0);
465 AssertRCReturn(rc, rc);
466 pVM->pgm.s.apGstPaePDsHC[i] = (R3R0PTRTYPE(PX86PDPAE))HCPtr;
467 pVM->pgm.s.apGstPaePDsGC[i] = (RCPTRTYPE(PX86PDPAE))GCPtr;
468 pVM->pgm.s.aGCPhysGstPaePDs[i] = GCPhys;
469 PGM_INVL_PG(GCPtr);
470 continue;
471 }
472 AssertMsgFailed(("pgmR3Gst32BitMapCR3: rc2=%d GCPhys=%RGp i=%d\n", rc2, GCPhys, i));
473 }
474
475 pVM->pgm.s.apGstPaePDsHC[i] = 0;
476 pVM->pgm.s.apGstPaePDsGC[i] = 0;
477 pVM->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
478 PGM_INVL_PG(GCPtr);
479 }
480# elif PGM_GST_TYPE == PGM_TYPE_AMD64
481 PPGMPOOL pPool = pVM->pgm.s.CTXSUFF(pPool);
482
483 pVM->pgm.s.pGstPaePML4HC = (R3R0PTRTYPE(PX86PML4))HCPtrGuestCR3;
484
485 if (!HWACCMIsNestedPagingActive(pVM))
486 {
487 if (pVM->pgm.s.pHCShwAmd64CR3)
488 {
489 pgmPoolFreeByPage(pPool, pVM->pgm.s.pHCShwAmd64CR3, PGMPOOL_IDX_AMD64_CR3, pVM->pgm.s.pHCShwAmd64CR3->GCPhys >> PAGE_SHIFT);
490 pVM->pgm.s.pHCShwAmd64CR3 = 0;
491 }
492
493 Assert(!(GCPhysCR3 >> (PAGE_SHIFT + 32)));
494 rc = pgmPoolAlloc(pVM, GCPhysCR3, PGMPOOLKIND_64BIT_PML4_FOR_64BIT_PML4, PGMPOOL_IDX_AMD64_CR3, GCPhysCR3 >> PAGE_SHIFT, &pVM->pgm.s.pHCShwAmd64CR3);
495 if (rc == VERR_PGM_POOL_FLUSHED)
496 {
497 AssertFailed(); /* check if we handle this properly!! */
498 return VINF_PGM_SYNC_CR3;
499 }
500 pVM->pgm.s.pHCPaePML4 = (PX86PML4)PGMPOOL_PAGE_2_PTR(pPool->CTXSUFF(pVM), pVM->pgm.s.pHCShwAmd64CR3);
501 pVM->pgm.s.HCPhysPaePML4 = pVM->pgm.s.pHCShwAmd64CR3->Core.Key;
502 }
503# endif
504 }
505 else
506 AssertMsgFailed(("rc=%Vrc GCPhysGuestPD=%VGp\n", rc, GCPhysCR3));
507 }
508 else
509 AssertMsgFailed(("rc=%Vrc GCPhysGuestPD=%VGp\n", rc, GCPhysCR3));
510
511#else /* prot/real stub */
512 int rc = VINF_SUCCESS;
513#endif
514 return rc;
515}
516
517
518/**
519 * Unmaps the CR3.
520 *
521 * @returns VBox status, no specials.
522 * @param pVM VM handle.
523 */
524PGM_GST_DECL(int, UnmapCR3)(PVM pVM)
525{
526 LogFlow(("UnmapCR3\n"));
527
528 int rc = VINF_SUCCESS;
529
530#if PGM_GST_TYPE == PGM_TYPE_32BIT
531 pVM->pgm.s.pGuestPDHC = 0;
532 pVM->pgm.s.pGuestPDGC = 0;
533
534#elif PGM_GST_TYPE == PGM_TYPE_PAE
535 pVM->pgm.s.pGstPaePDPTHC = 0;
536 pVM->pgm.s.pGstPaePDPTGC = 0;
537 for (unsigned i=0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
538 {
539 pVM->pgm.s.apGstPaePDsHC[i] = 0;
540 pVM->pgm.s.apGstPaePDsGC[i] = 0;
541 pVM->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
542 }
543
544#elif PGM_GST_TYPE == PGM_TYPE_AMD64
545 pVM->pgm.s.pGstPaePML4HC = 0;
546 if (!HWACCMIsNestedPagingActive(pVM))
547 {
548 pVM->pgm.s.pHCPaePML4 = 0;
549 if (pVM->pgm.s.pHCShwAmd64CR3)
550 {
551 PPGMPOOL pPool = pVM->pgm.s.CTXSUFF(pPool);
552 pgmPoolFreeByPage(pPool, pVM->pgm.s.pHCShwAmd64CR3, PGMPOOL_IDX_AMD64_CR3, pVM->pgm.s.pHCShwAmd64CR3->GCPhys >> PAGE_SHIFT);
553 pVM->pgm.s.pHCShwAmd64CR3 = NULL;
554 }
555 }
556
557#else /* prot/real mode stub */
558 /* nothing to do */
559#endif
560 return rc;
561}
562
563
564#undef LOG_GROUP
565#define LOG_GROUP LOG_GROUP_PGM_POOL
566
567/**
568 * Registers physical page monitors for the necessary paging
569 * structures to detect conflicts with our guest mappings.
570 *
571 * This is always called after mapping CR3.
572 * This is never called with fixed mappings.
573 *
574 * @returns VBox status, no specials.
575 * @param pVM VM handle.
576 * @param GCPhysCR3 The physical address in the CR3 register.
577 */
578PGM_GST_DECL(int, MonitorCR3)(PVM pVM, RTGCPHYS GCPhysCR3)
579{
580 Assert(!pVM->pgm.s.fMappingsFixed);
581 int rc = VINF_SUCCESS;
582
583 /*
584 * Register/Modify write phys handler for guest's CR3 if it changed.
585 */
586#if PGM_GST_TYPE == PGM_TYPE_32BIT
587
588 if (pVM->pgm.s.GCPhysGstCR3Monitored != GCPhysCR3)
589 {
590# ifndef PGMPOOL_WITH_MIXED_PT_CR3
591 const unsigned cbCR3Stuff = PGM_GST_TYPE == PGM_TYPE_PAE ? 32 : PAGE_SIZE;
592 if (pVM->pgm.s.GCPhysGstCR3Monitored != NIL_RTGCPHYS)
593 rc = PGMHandlerPhysicalModify(pVM, pVM->pgm.s.GCPhysGstCR3Monitored, GCPhysCR3, GCPhysCR3 + cbCR3Stuff - 1);
594 else
595 rc = PGMHandlerPhysicalRegisterEx(pVM, PGMPHYSHANDLERTYPE_PHYSICAL_WRITE, GCPhysCR3, GCPhysCR3 + cbCR3Stuff - 1,
596 pVM->pgm.s.pfnR3GstWriteHandlerCR3, 0,
597 pVM->pgm.s.pfnR0GstWriteHandlerCR3, 0,
598 pVM->pgm.s.pfnGCGstWriteHandlerCR3, 0,
599 pVM->pgm.s.pszR3GstWriteHandlerCR3);
600# else /* PGMPOOL_WITH_MIXED_PT_CR3 */
601 rc = pgmPoolMonitorMonitorCR3(pVM->pgm.s.CTXSUFF(pPool),
602 pVM->pgm.s.enmShadowMode == PGMMODE_PAE
603 || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX
604 ? PGMPOOL_IDX_PAE_PD
605 : PGMPOOL_IDX_PD,
606 GCPhysCR3);
607# endif /* PGMPOOL_WITH_MIXED_PT_CR3 */
608 if (VBOX_FAILURE(rc))
609 {
610 AssertMsgFailed(("PGMHandlerPhysicalModify/PGMR3HandlerPhysicalRegister failed, rc=%Rrc GCPhysGstCR3Monitored=%RGp GCPhysCR3=%RGp\n",
611 rc, pVM->pgm.s.GCPhysGstCR3Monitored, GCPhysCR3));
612 return rc;
613 }
614 pVM->pgm.s.GCPhysGstCR3Monitored = GCPhysCR3;
615 }
616
617#elif PGM_GST_TYPE == PGM_TYPE_PAE
618 /* Monitor the PDPT page */
619 /*
620 * Register/Modify write phys handler for guest's CR3 if it changed.
621 */
622# ifndef PGMPOOL_WITH_MIXED_PT_CR3
623 AssertFailed();
624# endif
625 if (pVM->pgm.s.GCPhysGstCR3Monitored != GCPhysCR3)
626 {
627 rc = pgmPoolMonitorMonitorCR3(pVM->pgm.s.CTXSUFF(pPool), PGMPOOL_IDX_PDPT, GCPhysCR3);
628 if (VBOX_FAILURE(rc))
629 {
630 AssertMsgFailed(("PGMHandlerPhysicalModify/PGMR3HandlerPhysicalRegister failed, rc=%Rrc GCPhysGstCR3Monitored=%RGp GCPhysCR3=%RGp\n",
631 rc, pVM->pgm.s.GCPhysGstCR3Monitored, GCPhysCR3));
632 return rc;
633 }
634 pVM->pgm.s.GCPhysGstCR3Monitored = GCPhysCR3;
635 }
636 /*
637 * Do the 4 PDs.
638 */
639 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
640 {
641 if (CTXSUFF(pVM->pgm.s.pGstPaePDPT)->a[i].n.u1Present)
642 {
643 RTGCPHYS GCPhys = CTXSUFF(pVM->pgm.s.pGstPaePDPT)->a[i].u & X86_PDPE_PG_MASK;
644 if (pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] != GCPhys)
645 {
646 Assert(pVM->pgm.s.enmShadowMode == PGMMODE_PAE || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX);
647
648 rc = pgmPoolMonitorMonitorCR3(pVM->pgm.s.CTXSUFF(pPool), PGMPOOL_IDX_PAE_PD_0 + i, GCPhys);
649 }
650
651 if (VBOX_FAILURE(rc))
652 {
653 AssertMsgFailed(("PGMHandlerPhysicalModify/PGMR3HandlerPhysicalRegister failed, rc=%Rrc GCPhysGstCR3Monitored=%RGp GCPhysCR3=%RGp\n",
654 rc, pVM->pgm.s.aGCPhysGstPaePDsMonitored[i], GCPhys));
655 return rc;
656 }
657 pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] = GCPhys;
658 }
659 else if (pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] != NIL_RTGCPHYS)
660 {
661 rc = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTXSUFF(pPool), PGMPOOL_IDX_PAE_PD_0 + i);
662 AssertRC(rc);
663 pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] = NIL_RTGCPHYS;
664 }
665 }
666
667#else
668 /* prot/real/amd64 mode stub */
669
670#endif
671 return rc;
672}
673
674/**
675 * Deregisters any physical page monitors installed by MonitorCR3.
676 *
677 * @returns VBox status code, no specials.
678 * @param pVM The VM handle.
679 */
680PGM_GST_DECL(int, UnmonitorCR3)(PVM pVM)
681{
682 int rc = VINF_SUCCESS;
683
684 /*
685 * Deregister the access handlers.
686 *
687 * PGMSyncCR3 will reinstall it if required and PGMSyncCR3 will be executed
688 * before we enter GC again.
689 */
690#if PGM_GST_TYPE == PGM_TYPE_32BIT
691 if (pVM->pgm.s.GCPhysGstCR3Monitored != NIL_RTGCPHYS)
692 {
693# ifndef PGMPOOL_WITH_MIXED_PT_CR3
694 rc = PGMHandlerPhysicalDeregister(pVM, pVM->pgm.s.GCPhysGstCR3Monitored);
695 AssertRCReturn(rc, rc);
696# else /* PGMPOOL_WITH_MIXED_PT_CR3 */
697 rc = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTXSUFF(pPool),
698 pVM->pgm.s.enmShadowMode == PGMMODE_PAE
699 || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX
700 ? PGMPOOL_IDX_PAE_PD
701 : PGMPOOL_IDX_PD);
702 AssertRCReturn(rc, rc);
703# endif /* PGMPOOL_WITH_MIXED_PT_CR3 */
704 pVM->pgm.s.GCPhysGstCR3Monitored = NIL_RTGCPHYS;
705 }
706
707#elif PGM_GST_TYPE == PGM_TYPE_PAE
708 /* The PDPT page */
709# ifndef PGMPOOL_WITH_MIXED_PT_CR3
710 AssertFailed();
711# endif
712
713 if (pVM->pgm.s.GCPhysGstCR3Monitored != NIL_RTGCPHYS)
714 {
715 rc = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTXSUFF(pPool), PGMPOOL_IDX_PDPT);
716 AssertRC(rc);
717 }
718
719 /* The 4 PDs. */
720 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
721 {
722 if (pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] != NIL_RTGCPHYS)
723 {
724 Assert(pVM->pgm.s.enmShadowMode == PGMMODE_PAE || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX);
725 int rc2 = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTXSUFF(pPool), PGMPOOL_IDX_PAE_PD_0 + i);
726 AssertRC(rc2);
727 if (VBOX_FAILURE(rc2))
728 rc = rc2;
729 pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] = NIL_RTGCPHYS;
730 }
731 }
732#else
733 /* prot/real/amd64 mode stub */
734#endif
735 return rc;
736
737}
738
739#undef LOG_GROUP
740#define LOG_GROUP LOG_GROUP_PGM
741
742
743#if PGM_GST_TYPE == PGM_TYPE_32BIT \
744 || PGM_GST_TYPE == PGM_TYPE_PAE \
745 || PGM_GST_TYPE == PGM_TYPE_AMD64
746/**
747 * Updates one virtual handler range.
748 *
749 * @returns 0
750 * @param pNode Pointer to a PGMVIRTHANDLER.
751 * @param pvUser Pointer to a PGMVHUARGS structure (see PGM.cpp).
752 */
753static DECLCALLBACK(int) PGM_GST_NAME(VirtHandlerUpdateOne)(PAVLROGCPTRNODECORE pNode, void *pvUser)
754{
755 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)pNode;
756 PPGMHVUSTATE pState = (PPGMHVUSTATE)pvUser;
757 Assert(pCur->enmType != PGMVIRTHANDLERTYPE_HYPERVISOR);
758
759#if PGM_GST_TYPE == PGM_TYPE_32BIT
760 PX86PD pPDSrc = pState->pVM->pgm.s.CTXSUFF(pGuestPD);
761#endif
762
763 RTGCUINTPTR GCPtr = (RTUINTPTR)pCur->GCPtr;
764#if PGM_GST_MODE != PGM_MODE_AMD64
765 /* skip all stuff above 4GB if not AMD64 mode. */
766 if (GCPtr >= _4GB)
767 return 0;
768#endif
769
770 unsigned offPage = GCPtr & PAGE_OFFSET_MASK;
771 unsigned iPage = 0;
772 while (iPage < pCur->cPages)
773 {
774#if PGM_GST_TYPE == PGM_TYPE_32BIT
775 X86PDE Pde = pPDSrc->a[GCPtr >> X86_PD_SHIFT];
776#elif PGM_GST_TYPE == PGM_TYPE_PAE
777 X86PDEPAE Pde;
778 Pde.u = pgmGstGetPaePDE(&pState->pVM->pgm.s, GCPtr);
779#elif PGM_GST_TYPE == PGM_TYPE_AMD64
780 X86PDEPAE Pde;
781 Pde.u = pgmGstGetLongModePDE(&pState->pVM->pgm.s, GCPtr);
782#endif
783 if (Pde.n.u1Present)
784 {
785 if ( !Pde.b.u1Size
786# if PGM_GST_TYPE != PGM_TYPE_AMD64
787 || !(pState->cr4 & X86_CR4_PSE)
788# endif
789 )
790 {
791 /*
792 * Normal page table.
793 */
794 PGSTPT pPT;
795 int rc = PGM_GCPHYS_2_PTR(pState->pVM, Pde.u & GST_PDE_PG_MASK, &pPT);
796 if (VBOX_SUCCESS(rc))
797 {
798 for (unsigned iPTE = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
799 iPTE < RT_ELEMENTS(pPT->a) && iPage < pCur->cPages;
800 iPTE++, iPage++, GCPtr += PAGE_SIZE, offPage = 0)
801 {
802 GSTPTE Pte = pPT->a[iPTE];
803 RTGCPHYS GCPhysNew;
804 if (Pte.n.u1Present)
805 GCPhysNew = (RTGCPHYS)(pPT->a[iPTE].u & GST_PTE_PG_MASK) + offPage;
806 else
807 GCPhysNew = NIL_RTGCPHYS;
808 if (pCur->aPhysToVirt[iPage].Core.Key != GCPhysNew)
809 {
810 if (pCur->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
811 pgmHandlerVirtualClearPage(&pState->pVM->pgm.s, pCur, iPage);
812#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
813 AssertReleaseMsg(!pCur->aPhysToVirt[iPage].offNextAlias,
814 ("{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} GCPhysNew=%VGp\n",
815 pCur->aPhysToVirt[iPage].Core.Key, pCur->aPhysToVirt[iPage].Core.KeyLast,
816 pCur->aPhysToVirt[iPage].offVirtHandler, pCur->aPhysToVirt[iPage].offNextAlias, GCPhysNew));
817#endif
818 pCur->aPhysToVirt[iPage].Core.Key = GCPhysNew;
819 pState->fTodo |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
820 }
821 }
822 }
823 else
824 {
825 /* not-present. */
826 offPage = 0;
827 AssertRC(rc);
828 for (unsigned iPTE = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
829 iPTE < RT_ELEMENTS(pPT->a) && iPage < pCur->cPages;
830 iPTE++, iPage++, GCPtr += PAGE_SIZE)
831 {
832 if (pCur->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
833 {
834 pgmHandlerVirtualClearPage(&pState->pVM->pgm.s, pCur, iPage);
835#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
836 AssertReleaseMsg(!pCur->aPhysToVirt[iPage].offNextAlias,
837 ("{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
838 pCur->aPhysToVirt[iPage].Core.Key, pCur->aPhysToVirt[iPage].Core.KeyLast,
839 pCur->aPhysToVirt[iPage].offVirtHandler, pCur->aPhysToVirt[iPage].offNextAlias));
840#endif
841 pCur->aPhysToVirt[iPage].Core.Key = NIL_RTGCPHYS;
842 pState->fTodo |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
843 }
844 }
845 }
846 }
847 else
848 {
849 /*
850 * 2/4MB page.
851 */
852 RTGCPHYS GCPhys = (RTGCPHYS)(Pde.u & GST_PDE_PG_MASK);
853 for (unsigned i4KB = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
854 i4KB < PAGE_SIZE / sizeof(GSTPDE) && iPage < pCur->cPages;
855 i4KB++, iPage++, GCPtr += PAGE_SIZE, offPage = 0)
856 {
857 RTGCPHYS GCPhysNew = GCPhys + (i4KB << PAGE_SHIFT) + offPage;
858 if (pCur->aPhysToVirt[iPage].Core.Key != GCPhysNew)
859 {
860 if (pCur->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
861 pgmHandlerVirtualClearPage(&pState->pVM->pgm.s, pCur, iPage);
862#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
863 AssertReleaseMsg(!pCur->aPhysToVirt[iPage].offNextAlias,
864 ("{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} GCPhysNew=%VGp\n",
865 pCur->aPhysToVirt[iPage].Core.Key, pCur->aPhysToVirt[iPage].Core.KeyLast,
866 pCur->aPhysToVirt[iPage].offVirtHandler, pCur->aPhysToVirt[iPage].offNextAlias, GCPhysNew));
867#endif
868 pCur->aPhysToVirt[iPage].Core.Key = GCPhysNew;
869 pState->fTodo |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
870 }
871 }
872 } /* pde type */
873 }
874 else
875 {
876 /* not-present. */
877 for (unsigned cPages = (GST_PT_MASK + 1) - ((GCPtr >> GST_PT_SHIFT) & GST_PT_MASK);
878 cPages && iPage < pCur->cPages;
879 iPage++, GCPtr += PAGE_SIZE)
880 {
881 if (pCur->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
882 {
883 pgmHandlerVirtualClearPage(&pState->pVM->pgm.s, pCur, iPage);
884 pCur->aPhysToVirt[iPage].Core.Key = NIL_RTGCPHYS;
885 pState->fTodo |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
886 }
887 }
888 offPage = 0;
889 }
890 } /* for pages in virtual mapping. */
891
892 return 0;
893}
894#endif /* 32BIT, PAE and AMD64 */
895
896
897/**
898 * Updates the virtual page access handlers.
899 *
900 * @returns true if bits were flushed.
901 * @returns false if bits weren't flushed.
902 * @param pVM VM handle.
903 * @param pPDSrc The page directory.
904 * @param cr4 The cr4 register value.
905 */
906PGM_GST_DECL(bool, HandlerVirtualUpdate)(PVM pVM, uint32_t cr4)
907{
908#if PGM_GST_TYPE == PGM_TYPE_32BIT \
909 || PGM_GST_TYPE == PGM_TYPE_PAE \
910 || PGM_GST_TYPE == PGM_TYPE_AMD64
911
912 /** @todo
913 * In theory this is not sufficient: the guest can change a single page in a range with invlpg
914 */
915
916 /*
917 * Resolve any virtual address based access handlers to GC physical addresses.
918 * This should be fairly quick.
919 */
920 PGMHVUSTATE State;
921
922 pgmLock(pVM);
923 STAM_PROFILE_START(&pVM->pgm.s.CTXMID(Stat,SyncCR3HandlerVirtualUpdate), a);
924 State.pVM = pVM;
925 State.fTodo = pVM->pgm.s.fSyncFlags;
926 State.cr4 = cr4;
927 RTAvlroGCPtrDoWithAll(&pVM->pgm.s.CTXSUFF(pTrees)->VirtHandlers, true, PGM_GST_NAME(VirtHandlerUpdateOne), &State);
928 STAM_PROFILE_STOP(&pVM->pgm.s.CTXMID(Stat,SyncCR3HandlerVirtualUpdate), a);
929
930
931 /*
932 * Set / reset bits?
933 */
934 if (State.fTodo & PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL)
935 {
936 STAM_PROFILE_START(&pVM->pgm.s.CTXMID(Stat,SyncCR3HandlerVirtualReset), b);
937 Log(("pgmR3VirtualHandlersUpdate: resets bits\n"));
938 RTAvlroGCPtrDoWithAll(&pVM->pgm.s.CTXSUFF(pTrees)->VirtHandlers, true, pgmHandlerVirtualResetOne, pVM);
939 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
940 STAM_PROFILE_STOP(&pVM->pgm.s.CTXMID(Stat,SyncCR3HandlerVirtualReset), b);
941 }
942 pgmUnlock(pVM);
943
944 return !!(State.fTodo & PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL);
945
946#else /* real / protected */
947 return false;
948#endif
949}
950
951
952#if PGM_GST_TYPE == PGM_TYPE_32BIT && !defined(IN_RING3)
953
954/**
955 * Write access handler for the Guest CR3 page in 32-bit mode.
956 *
957 * This will try interpret the instruction, if failure fail back to the recompiler.
958 * Check if the changed PDEs are marked present and conflicts with our
959 * mappings. If conflict, we'll switch to the host context and resolve it there
960 *
961 * @returns VBox status code (appropritate for trap handling and GC return).
962 * @param pVM VM Handle.
963 * @param uErrorCode CPU Error code.
964 * @param pRegFrame Trap register frame.
965 * @param pvFault The fault address (cr2).
966 * @param GCPhysFault The GC physical address corresponding to pvFault.
967 * @param pvUser User argument.
968 */
969PGM_GST_DECL(int, WriteHandlerCR3)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
970{
971 AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));
972
973 /*
974 * Try interpret the instruction.
975 */
976 uint32_t cb;
977 int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
978 if (VBOX_SUCCESS(rc) && cb)
979 {
980 /*
981 * Check if the modified PDEs are present and mappings.
982 */
983 const RTGCUINTPTR offPD = GCPhysFault & PAGE_OFFSET_MASK;
984 const unsigned iPD1 = offPD / sizeof(X86PDE);
985 const unsigned iPD2 = (offPD + cb - 1) / sizeof(X86PDE);
986
987 Assert(cb > 0 && cb <= 8);
988 Assert(iPD1 < RT_ELEMENTS(pVM->pgm.s.CTXSUFF(pGuestPD)->a)); /// @todo R3/R0 separation.
989 Assert(iPD2 < RT_ELEMENTS(pVM->pgm.s.CTXSUFF(pGuestPD)->a));
990
991#ifdef DEBUG
992 Log(("pgmXXGst32BitWriteHandlerCR3: emulated change to PD %#x addr=%VGv\n", iPD1, iPD1 << X86_PD_SHIFT));
993 if (iPD1 != iPD2)
994 Log(("pgmXXGst32BitWriteHandlerCR3: emulated change to PD %#x addr=%VGv\n", iPD2, iPD2 << X86_PD_SHIFT));
995#endif
996
997 if (!pVM->pgm.s.fMappingsFixed)
998 {
999 PX86PD pPDSrc = CTXSUFF(pVM->pgm.s.pGuestPD);
1000 if ( ( pPDSrc->a[iPD1].n.u1Present
1001 && pgmGetMapping(pVM, (RTGCPTR)(iPD1 << X86_PD_SHIFT)) )
1002 || ( iPD1 != iPD2
1003 && pPDSrc->a[iPD2].n.u1Present
1004 && pgmGetMapping(pVM, (RTGCPTR)(iPD2 << X86_PD_SHIFT)) )
1005 )
1006 {
1007 STAM_COUNTER_INC(&pVM->pgm.s.StatGCGuestCR3WriteConflict);
1008 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1009 if (rc == VINF_SUCCESS)
1010 rc = VINF_PGM_SYNC_CR3;
1011 Log(("pgmXXGst32BitWriteHandlerCR3: detected conflict iPD1=%#x iPD2=%#x - returns %Rrc\n", iPD1, iPD2, rc));
1012 return rc;
1013 }
1014 }
1015
1016 STAM_COUNTER_INC(&pVM->pgm.s.StatGCGuestCR3WriteHandled);
1017 }
1018 else
1019 {
1020 Assert(VBOX_FAILURE(rc));
1021 if (rc == VERR_EM_INTERPRETER)
1022 rc = VINF_EM_RAW_EMULATE_INSTR_PD_FAULT;
1023 Log(("pgmXXGst32BitWriteHandlerCR3: returns %Rrc\n", rc));
1024 STAM_COUNTER_INC(&pVM->pgm.s.StatGCGuestCR3WriteUnhandled);
1025 }
1026 return rc;
1027}
1028
1029#endif /* PGM_TYPE_32BIT && !IN_RING3 */
1030
1031
1032#if PGM_GST_TYPE == PGM_TYPE_PAE && !defined(IN_RING3)
1033
1034/**
1035 * Write access handler for the Guest CR3 page in PAE mode.
1036 *
1037 * This will try interpret the instruction, if failure fail back to the recompiler.
1038 * Check if the changed PDEs are marked present and conflicts with our
1039 * mappings. If conflict, we'll switch to the host context and resolve it there
1040 *
1041 * @returns VBox status code (appropritate for trap handling and GC return).
1042 * @param pVM VM Handle.
1043 * @param uErrorCode CPU Error code.
1044 * @param pRegFrame Trap register frame.
1045 * @param pvFault The fault address (cr2).
1046 * @param GCPhysFault The GC physical address corresponding to pvFault.
1047 * @param pvUser User argument.
1048 */
1049PGM_GST_DECL(int, WriteHandlerCR3)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
1050{
1051 AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));
1052
1053 /*
1054 * Try interpret the instruction.
1055 */
1056 uint32_t cb;
1057 int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
1058 if (VBOX_SUCCESS(rc) && cb)
1059 {
1060 /*
1061 * Check if any of the PDs have changed.
1062 * We'll simply check all of them instead of figuring out which one/two to check.
1063 */
1064 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
1065 {
1066 if ( CTXSUFF(pVM->pgm.s.pGstPaePDPT)->a[i].n.u1Present
1067 && ( CTXSUFF(pVM->pgm.s.pGstPaePDPT)->a[i].u & X86_PDPE_PG_MASK)
1068 != pVM->pgm.s.aGCPhysGstPaePDsMonitored[i])
1069 {
1070 /*
1071 * The PDPE has changed.
1072 * We will schedule a monitoring update for the next TLB Flush,
1073 * InvalidatePage or SyncCR3.
1074 *
1075 * This isn't perfect, because a lazy page sync might be dealing with an half
1076 * updated PDPE. However, we assume that the guest OS is disabling interrupts
1077 * and being extremely careful (cmpxchg8b) when updating a PDPE where it's
1078 * executing.
1079 */
1080 pVM->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
1081 Log(("pgmXXGstPaeWriteHandlerCR3: detected updated PDPE; [%d] = %#llx, Old GCPhys=%VGp\n",
1082 i, CTXSUFF(pVM->pgm.s.pGstPaePDPT)->a[i].u, pVM->pgm.s.aGCPhysGstPaePDsMonitored[i]));
1083 }
1084 }
1085
1086 STAM_COUNTER_INC(&pVM->pgm.s.StatGCGuestCR3WriteHandled);
1087 }
1088 else
1089 {
1090 Assert(VBOX_FAILURE(rc));
1091 STAM_COUNTER_INC(&pVM->pgm.s.StatGCGuestCR3WriteUnhandled);
1092 if (rc == VERR_EM_INTERPRETER)
1093 rc = VINF_EM_RAW_EMULATE_INSTR_PD_FAULT;
1094 }
1095 Log(("pgmXXGstPaeWriteHandlerCR3: returns %Rrc\n", rc));
1096 return rc;
1097}
1098
1099
1100/**
1101 * Write access handler for the Guest PDs in PAE mode.
1102 *
1103 * This will try interpret the instruction, if failure fail back to the recompiler.
1104 * Check if the changed PDEs are marked present and conflicts with our
1105 * mappings. If conflict, we'll switch to the host context and resolve it there
1106 *
1107 * @returns VBox status code (appropritate for trap handling and GC return).
1108 * @param pVM VM Handle.
1109 * @param uErrorCode CPU Error code.
1110 * @param pRegFrame Trap register frame.
1111 * @param pvFault The fault address (cr2).
1112 * @param GCPhysFault The GC physical address corresponding to pvFault.
1113 * @param pvUser User argument.
1114 */
1115PGM_GST_DECL(int, WriteHandlerPD)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
1116{
1117 AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));
1118
1119 /*
1120 * Try interpret the instruction.
1121 */
1122 uint32_t cb;
1123 int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
1124 if (VBOX_SUCCESS(rc) && cb)
1125 {
1126 /*
1127 * Figure out which of the 4 PDs this is.
1128 */
1129 RTGCUINTPTR i;
1130 for (i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
1131 if (CTXSUFF(pVM->pgm.s.pGstPaePDPT)->a[i].u == (GCPhysFault & X86_PTE_PAE_PG_MASK))
1132 {
1133 PX86PDPAE pPDSrc = pgmGstGetPaePD(&pVM->pgm.s, i << X86_PDPT_SHIFT);
1134 const RTGCUINTPTR offPD = GCPhysFault & PAGE_OFFSET_MASK;
1135 const unsigned iPD1 = offPD / sizeof(X86PDEPAE);
1136 const unsigned iPD2 = (offPD + cb - 1) / sizeof(X86PDEPAE);
1137
1138 Assert(cb > 0 && cb <= 8);
1139 Assert(iPD1 < X86_PG_PAE_ENTRIES);
1140 Assert(iPD2 < X86_PG_PAE_ENTRIES);
1141
1142#ifdef DEBUG
1143 Log(("pgmXXGstPaeWriteHandlerPD: emulated change to i=%d iPD1=%#05x (%VGv)\n",
1144 i, iPD1, (i << X86_PDPT_SHIFT) | (iPD1 << X86_PD_PAE_SHIFT)));
1145 if (iPD1 != iPD2)
1146 Log(("pgmXXGstPaeWriteHandlerPD: emulated change to i=%d iPD2=%#05x (%VGv)\n",
1147 i, iPD2, (i << X86_PDPT_SHIFT) | (iPD2 << X86_PD_PAE_SHIFT)));
1148#endif
1149
1150 if (!pVM->pgm.s.fMappingsFixed)
1151 {
1152 if ( ( pPDSrc->a[iPD1].n.u1Present
1153 && pgmGetMapping(pVM, (RTGCPTR)((i << X86_PDPT_SHIFT) | (iPD1 << X86_PD_PAE_SHIFT))) )
1154 || ( iPD1 != iPD2
1155 && pPDSrc->a[iPD2].n.u1Present
1156 && pgmGetMapping(pVM, (RTGCPTR)((i << X86_PDPT_SHIFT) | (iPD2 << X86_PD_PAE_SHIFT))) )
1157 )
1158 {
1159 Log(("pgmXXGstPaeWriteHandlerPD: detected conflict iPD1=%#x iPD2=%#x\n", iPD1, iPD2));
1160 STAM_COUNTER_INC(&pVM->pgm.s.StatGCGuestCR3WriteConflict);
1161 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1162 return VINF_PGM_SYNC_CR3;
1163 }
1164 }
1165 break; /* ASSUMES no duplicate entries... */
1166 }
1167 Assert(i < 4);
1168
1169 STAM_COUNTER_INC(&pVM->pgm.s.StatGCGuestCR3WriteHandled);
1170 }
1171 else
1172 {
1173 Assert(VBOX_FAILURE(rc));
1174 if (rc == VERR_EM_INTERPRETER)
1175 rc = VINF_EM_RAW_EMULATE_INSTR_PD_FAULT;
1176 else
1177 Log(("pgmXXGst32BitWriteHandlerCR3: returns %Rrc\n", rc));
1178 STAM_COUNTER_INC(&pVM->pgm.s.StatGCGuestCR3WriteUnhandled);
1179 }
1180 return rc;
1181}
1182
1183#endif /* PGM_TYPE_PAE && !IN_RING3 */
1184
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette