VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/PGMR0.cpp@ 22349

Last change on this file since 22349 was 20671, checked in by vboxsync, 15 years ago

Bigger lock for the pagefault handler.
Avoid deadlocks when syncing notification handlers with our recompiler.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 9.8 KB
Line 
1/* $Id: PGMR0.cpp 20671 2009-06-17 15:23:14Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Ring-0.
4 */
5
6/*
7 * Copyright (C) 2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_PGM
26#include <VBox/pgm.h>
27#include "PGMInternal.h"
28#include <VBox/vm.h>
29#include <VBox/log.h>
30#include <VBox/err.h>
31#include <iprt/assert.h>
32
33RT_C_DECLS_BEGIN
34#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
35#include "PGMR0Bth.h"
36#undef PGM_BTH_NAME
37
38#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
39#include "PGMR0Bth.h"
40#undef PGM_BTH_NAME
41
42#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
43#include "PGMR0Bth.h"
44#undef PGM_BTH_NAME
45
46#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
47#include "PGMR0Bth.h"
48#undef PGM_BTH_NAME
49
50RT_C_DECLS_END
51
52
53/**
54 * Worker function for PGMR3PhysAllocateHandyPages and pgmPhysEnsureHandyPage.
55 *
56 * @returns The following VBox status codes.
57 * @retval VINF_SUCCESS on success. FF cleared.
58 * @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is set in this case.
59 *
60 * @param pVM The VM handle.
61 * @param pVCpu The VMCPU handle.
62 *
63 * @remarks Must be called from within the PGM critical section. The caller
64 * must clear the new pages.
65 */
66VMMR0DECL(int) PGMR0PhysAllocateHandyPages(PVM pVM, PVMCPU pVCpu)
67{
68 Assert(PDMCritSectIsOwnerEx(&pVM->pgm.s.CritSect, pVCpu->idCpu));
69
70 /*
71 * Check for error injection.
72 */
73 if (RT_UNLIKELY(pVM->pgm.s.fErrInjHandyPages))
74 return VERR_NO_MEMORY;
75
76 /*
77 * Try allocate a full set of handy pages.
78 */
79 uint32_t iFirst = pVM->pgm.s.cHandyPages;
80 AssertReturn(iFirst <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), VERR_INTERNAL_ERROR);
81 uint32_t cPages = RT_ELEMENTS(pVM->pgm.s.aHandyPages) - iFirst;
82 if (!cPages)
83 return VINF_SUCCESS;
84 int rc = GMMR0AllocateHandyPages(pVM, pVCpu->idCpu, cPages, cPages, &pVM->pgm.s.aHandyPages[iFirst]);
85 if (RT_SUCCESS(rc))
86 {
87 for (uint32_t i = 0; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
88 {
89 Assert(pVM->pgm.s.aHandyPages[i].idPage != NIL_GMM_PAGEID);
90 Assert(pVM->pgm.s.aHandyPages[i].idPage <= GMM_PAGEID_LAST);
91 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
92 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys != NIL_RTHCPHYS);
93 Assert(!(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
94 }
95
96 pVM->pgm.s.cHandyPages = RT_ELEMENTS(pVM->pgm.s.aHandyPages);
97 }
98 else if (rc != VERR_GMM_SEED_ME)
99 {
100 if ( ( rc == VERR_GMM_HIT_GLOBAL_LIMIT
101 || rc == VERR_GMM_HIT_VM_ACCOUNT_LIMIT)
102 && iFirst < PGM_HANDY_PAGES_MIN)
103 {
104
105#ifdef VBOX_STRICT
106 /* We're ASSUMING that GMM has updated all the entires before failing us. */
107 uint32_t i;
108 for (i = iFirst; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
109 {
110 Assert(pVM->pgm.s.aHandyPages[i].idPage == NIL_GMM_PAGEID);
111 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
112 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys == NIL_RTHCPHYS);
113 }
114#endif
115
116 /*
117 * Reduce the number of pages until we hit the minimum limit.
118 */
119 do
120 {
121 cPages >>= 2;
122 if (cPages + iFirst < PGM_HANDY_PAGES_MIN)
123 cPages = PGM_HANDY_PAGES_MIN - iFirst;
124 rc = GMMR0AllocateHandyPages(pVM, pVCpu->idCpu, cPages, cPages, &pVM->pgm.s.aHandyPages[iFirst]);
125 } while ( ( rc == VERR_GMM_HIT_GLOBAL_LIMIT
126 || rc == VERR_GMM_HIT_VM_ACCOUNT_LIMIT)
127 && cPages + iFirst > PGM_HANDY_PAGES_MIN);
128 if (RT_SUCCESS(rc))
129 {
130#ifdef VBOX_STRICT
131 i = iFirst + cPages;
132 while (i-- > 0)
133 {
134 Assert(pVM->pgm.s.aHandyPages[i].idPage != NIL_GMM_PAGEID);
135 Assert(pVM->pgm.s.aHandyPages[i].idPage <= GMM_PAGEID_LAST);
136 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
137 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys != NIL_RTHCPHYS);
138 Assert(!(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
139 }
140
141 for (i = cPages + iFirst; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
142 {
143 Assert(pVM->pgm.s.aHandyPages[i].idPage == NIL_GMM_PAGEID);
144 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
145 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys == NIL_RTHCPHYS);
146 }
147#endif
148
149 pVM->pgm.s.cHandyPages = iFirst + cPages;
150 }
151 }
152
153 if (RT_FAILURE(rc) && rc != VERR_GMM_SEED_ME)
154 {
155 LogRel(("PGMR0PhysAllocateHandyPages: rc=%Rrc iFirst=%d cPages=%d\n", rc, iFirst, cPages));
156 VM_FF_SET(pVM, VM_FF_PGM_NO_MEMORY);
157 }
158 }
159
160
161 LogFlow(("PGMR0PhysAllocateHandyPages: cPages=%d rc=%Rrc\n", cPages, rc));
162 return rc;
163}
164
165
166/**
167 * #PF Handler for nested paging.
168 *
169 * @returns VBox status code (appropriate for trap handling and GC return).
170 * @param pVM VM Handle.
171 * @param pVCpu VMCPU Handle.
172 * @param enmShwPagingMode Paging mode for the nested page tables
173 * @param uErr The trap error code.
174 * @param pRegFrame Trap register frame.
175 * @param pvFault The fault address.
176 */
177VMMR0DECL(int) PGMR0Trap0eHandlerNestedPaging(PVM pVM, PVMCPU pVCpu, PGMMODE enmShwPagingMode, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPHYS pvFault)
178{
179 int rc;
180
181 LogFlow(("PGMTrap0eHandler: uErr=%#x pvFault=%RGp eip=%RGv\n", uErr, pvFault, (RTGCPTR)pRegFrame->rip));
182 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0e, a);
183 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
184
185 /* AMD uses the host's paging mode; Intel has a single mode (EPT). */
186 AssertMsg(enmShwPagingMode == PGMMODE_32_BIT || enmShwPagingMode == PGMMODE_PAE || enmShwPagingMode == PGMMODE_PAE_NX || enmShwPagingMode == PGMMODE_AMD64 || enmShwPagingMode == PGMMODE_AMD64_NX || enmShwPagingMode == PGMMODE_EPT, ("enmShwPagingMode=%d\n", enmShwPagingMode));
187
188#ifdef VBOX_WITH_STATISTICS
189 /*
190 * Error code stats.
191 */
192 if (uErr & X86_TRAP_PF_US)
193 {
194 if (!(uErr & X86_TRAP_PF_P))
195 {
196 if (uErr & X86_TRAP_PF_RW)
197 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSNotPresentWrite);
198 else
199 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSNotPresentRead);
200 }
201 else if (uErr & X86_TRAP_PF_RW)
202 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSWrite);
203 else if (uErr & X86_TRAP_PF_RSVD)
204 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSReserved);
205 else if (uErr & X86_TRAP_PF_ID)
206 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSNXE);
207 else
208 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSRead);
209 }
210 else
211 { /* Supervisor */
212 if (!(uErr & X86_TRAP_PF_P))
213 {
214 if (uErr & X86_TRAP_PF_RW)
215 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSVNotPresentWrite);
216 else
217 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSVNotPresentRead);
218 }
219 else if (uErr & X86_TRAP_PF_RW)
220 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSVWrite);
221 else if (uErr & X86_TRAP_PF_ID)
222 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSNXE);
223 else if (uErr & X86_TRAP_PF_RSVD)
224 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSVReserved);
225 }
226#endif
227
228 /*
229 * Call the worker.
230 *
231 * We pretend the guest is in protected mode without paging, so we can use existing code to build the
232 * nested page tables.
233 */
234 pgmLock(pVM);
235 switch(enmShwPagingMode)
236 {
237 case PGMMODE_32_BIT:
238 rc = PGM_BTH_NAME_32BIT_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, pvFault);
239 break;
240 case PGMMODE_PAE:
241 case PGMMODE_PAE_NX:
242 rc = PGM_BTH_NAME_PAE_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, pvFault);
243 break;
244 case PGMMODE_AMD64:
245 case PGMMODE_AMD64_NX:
246 rc = PGM_BTH_NAME_AMD64_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, pvFault);
247 break;
248 case PGMMODE_EPT:
249 rc = PGM_BTH_NAME_EPT_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, pvFault);
250 break;
251 default:
252 AssertFailed();
253 rc = VERR_INVALID_PARAMETER;
254 break;
255 }
256 Assert(PGMIsLockOwner(pVM));
257 pgmUnlock(pVM);
258 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
259 rc = VINF_SUCCESS;
260 STAM_STATS({ if (!pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
261 pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2Misc; });
262 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.StatRZTrap0e, pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
263 return rc;
264}
265
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette