1 | /* $Id: PGMAll.cpp 17134 2009-02-25 15:10:11Z vboxsync $ */
|
---|
2 | /** @file
|
---|
3 | * PGM - Page Manager and Monitor - All context code.
|
---|
4 | */
|
---|
5 |
|
---|
6 | /*
|
---|
7 | * Copyright (C) 2006-2007 Sun Microsystems, Inc.
|
---|
8 | *
|
---|
9 | * This file is part of VirtualBox Open Source Edition (OSE), as
|
---|
10 | * available from http://www.virtualbox.org. This file is free software;
|
---|
11 | * you can redistribute it and/or modify it under the terms of the GNU
|
---|
12 | * General Public License (GPL) as published by the Free Software
|
---|
13 | * Foundation, in version 2 as it comes in the "COPYING" file of the
|
---|
14 | * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
|
---|
15 | * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
|
---|
16 | *
|
---|
17 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
|
---|
18 | * Clara, CA 95054 USA or visit http://www.sun.com if you need
|
---|
19 | * additional information or have any questions.
|
---|
20 | */
|
---|
21 |
|
---|
22 | /*******************************************************************************
|
---|
23 | * Header Files *
|
---|
24 | *******************************************************************************/
|
---|
25 | #define LOG_GROUP LOG_GROUP_PGM
|
---|
26 | #include <VBox/pgm.h>
|
---|
27 | #include <VBox/cpum.h>
|
---|
28 | #include <VBox/selm.h>
|
---|
29 | #include <VBox/iom.h>
|
---|
30 | #include <VBox/sup.h>
|
---|
31 | #include <VBox/mm.h>
|
---|
32 | #include <VBox/stam.h>
|
---|
33 | #include <VBox/csam.h>
|
---|
34 | #include <VBox/patm.h>
|
---|
35 | #include <VBox/trpm.h>
|
---|
36 | #include <VBox/rem.h>
|
---|
37 | #include <VBox/em.h>
|
---|
38 | #include <VBox/hwaccm.h>
|
---|
39 | #include <VBox/hwacc_vmx.h>
|
---|
40 | #include "PGMInternal.h"
|
---|
41 | #include <VBox/vm.h>
|
---|
42 | #include <iprt/assert.h>
|
---|
43 | #include <iprt/asm.h>
|
---|
44 | #include <iprt/string.h>
|
---|
45 | #include <VBox/log.h>
|
---|
46 | #include <VBox/param.h>
|
---|
47 | #include <VBox/err.h>
|
---|
48 |
|
---|
49 |
|
---|
50 | /*******************************************************************************
|
---|
51 | * Structures and Typedefs *
|
---|
52 | *******************************************************************************/
|
---|
53 | /**
|
---|
54 | * Stated structure for PGM_GST_NAME(HandlerVirtualUpdate) that's
|
---|
55 | * passed to PGM_GST_NAME(VirtHandlerUpdateOne) during enumeration.
|
---|
56 | */
|
---|
57 | typedef struct PGMHVUSTATE
|
---|
58 | {
|
---|
59 | /** The VM handle. */
|
---|
60 | PVM pVM;
|
---|
61 | /** The todo flags. */
|
---|
62 | RTUINT fTodo;
|
---|
63 | /** The CR4 register value. */
|
---|
64 | uint32_t cr4;
|
---|
65 | } PGMHVUSTATE, *PPGMHVUSTATE;
|
---|
66 |
|
---|
67 |
|
---|
68 | /*******************************************************************************
|
---|
69 | * Internal Functions *
|
---|
70 | *******************************************************************************/
|
---|
71 | DECLINLINE(int) pgmShwGetLongModePDPtr(PVM pVM, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
|
---|
72 | DECLINLINE(int) pgmShwGetPAEPDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
|
---|
73 | #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
|
---|
74 | DECLINLINE(int) pgmShwGetPaePoolPagePD(PPGM pPGM, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
|
---|
75 | #endif
|
---|
76 |
|
---|
77 | /*
|
---|
78 | * Shadow - 32-bit mode
|
---|
79 | */
|
---|
80 | #define PGM_SHW_TYPE PGM_TYPE_32BIT
|
---|
81 | #define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
|
---|
82 | #include "PGMAllShw.h"
|
---|
83 |
|
---|
84 | /* Guest - real mode */
|
---|
85 | #define PGM_GST_TYPE PGM_TYPE_REAL
|
---|
86 | #define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
|
---|
87 | #define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
|
---|
88 | #define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
|
---|
89 | #define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
|
---|
90 | #include "PGMAllGst.h"
|
---|
91 | #include "PGMAllBth.h"
|
---|
92 | #undef BTH_PGMPOOLKIND_PT_FOR_PT
|
---|
93 | #undef BTH_PGMPOOLKIND_ROOT
|
---|
94 | #undef PGM_BTH_NAME
|
---|
95 | #undef PGM_GST_TYPE
|
---|
96 | #undef PGM_GST_NAME
|
---|
97 |
|
---|
98 | /* Guest - protected mode */
|
---|
99 | #define PGM_GST_TYPE PGM_TYPE_PROT
|
---|
100 | #define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
|
---|
101 | #define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
|
---|
102 | #define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
|
---|
103 | #define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
|
---|
104 | #include "PGMAllGst.h"
|
---|
105 | #include "PGMAllBth.h"
|
---|
106 | #undef BTH_PGMPOOLKIND_PT_FOR_PT
|
---|
107 | #undef BTH_PGMPOOLKIND_ROOT
|
---|
108 | #undef PGM_BTH_NAME
|
---|
109 | #undef PGM_GST_TYPE
|
---|
110 | #undef PGM_GST_NAME
|
---|
111 |
|
---|
112 | /* Guest - 32-bit mode */
|
---|
113 | #define PGM_GST_TYPE PGM_TYPE_32BIT
|
---|
114 | #define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
|
---|
115 | #define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
|
---|
116 | #define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
|
---|
117 | #define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
|
---|
118 | #define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD
|
---|
119 | #include "PGMAllGst.h"
|
---|
120 | #include "PGMAllBth.h"
|
---|
121 | #undef BTH_PGMPOOLKIND_PT_FOR_BIG
|
---|
122 | #undef BTH_PGMPOOLKIND_PT_FOR_PT
|
---|
123 | #undef BTH_PGMPOOLKIND_ROOT
|
---|
124 | #undef PGM_BTH_NAME
|
---|
125 | #undef PGM_GST_TYPE
|
---|
126 | #undef PGM_GST_NAME
|
---|
127 |
|
---|
128 | #undef PGM_SHW_TYPE
|
---|
129 | #undef PGM_SHW_NAME
|
---|
130 |
|
---|
131 |
|
---|
132 | /*
|
---|
133 | * Shadow - PAE mode
|
---|
134 | */
|
---|
135 | #define PGM_SHW_TYPE PGM_TYPE_PAE
|
---|
136 | #define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
|
---|
137 | #define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
|
---|
138 | #include "PGMAllShw.h"
|
---|
139 |
|
---|
140 | /* Guest - real mode */
|
---|
141 | #define PGM_GST_TYPE PGM_TYPE_REAL
|
---|
142 | #define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
|
---|
143 | #define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
|
---|
144 | #define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
|
---|
145 | #define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
|
---|
146 | #include "PGMAllBth.h"
|
---|
147 | #undef BTH_PGMPOOLKIND_PT_FOR_PT
|
---|
148 | #undef BTH_PGMPOOLKIND_ROOT
|
---|
149 | #undef PGM_BTH_NAME
|
---|
150 | #undef PGM_GST_TYPE
|
---|
151 | #undef PGM_GST_NAME
|
---|
152 |
|
---|
153 | /* Guest - protected mode */
|
---|
154 | #define PGM_GST_TYPE PGM_TYPE_PROT
|
---|
155 | #define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
|
---|
156 | #define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
|
---|
157 | #define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
|
---|
158 | #define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
|
---|
159 | #include "PGMAllBth.h"
|
---|
160 | #undef BTH_PGMPOOLKIND_PT_FOR_PT
|
---|
161 | #undef BTH_PGMPOOLKIND_ROOT
|
---|
162 | #undef PGM_BTH_NAME
|
---|
163 | #undef PGM_GST_TYPE
|
---|
164 | #undef PGM_GST_NAME
|
---|
165 |
|
---|
166 | /* Guest - 32-bit mode */
|
---|
167 | #define PGM_GST_TYPE PGM_TYPE_32BIT
|
---|
168 | #define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
|
---|
169 | #define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
|
---|
170 | #define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
|
---|
171 | #define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
|
---|
172 | #define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_FOR_32BIT
|
---|
173 | #include "PGMAllBth.h"
|
---|
174 | #undef BTH_PGMPOOLKIND_PT_FOR_BIG
|
---|
175 | #undef BTH_PGMPOOLKIND_PT_FOR_PT
|
---|
176 | #undef BTH_PGMPOOLKIND_ROOT
|
---|
177 | #undef PGM_BTH_NAME
|
---|
178 | #undef PGM_GST_TYPE
|
---|
179 | #undef PGM_GST_NAME
|
---|
180 |
|
---|
181 |
|
---|
182 | /* Guest - PAE mode */
|
---|
183 | #define PGM_GST_TYPE PGM_TYPE_PAE
|
---|
184 | #define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
|
---|
185 | #define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
|
---|
186 | #define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
|
---|
187 | #define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
|
---|
188 | #define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT
|
---|
189 | #include "PGMAllGst.h"
|
---|
190 | #include "PGMAllBth.h"
|
---|
191 | #undef BTH_PGMPOOLKIND_PT_FOR_BIG
|
---|
192 | #undef BTH_PGMPOOLKIND_PT_FOR_PT
|
---|
193 | #undef BTH_PGMPOOLKIND_ROOT
|
---|
194 | #undef PGM_BTH_NAME
|
---|
195 | #undef PGM_GST_TYPE
|
---|
196 | #undef PGM_GST_NAME
|
---|
197 |
|
---|
198 | #undef PGM_SHW_TYPE
|
---|
199 | #undef PGM_SHW_NAME
|
---|
200 |
|
---|
201 |
|
---|
202 | #ifndef IN_RC /* AMD64 implies VT-x/AMD-V */
|
---|
203 | /*
|
---|
204 | * Shadow - AMD64 mode
|
---|
205 | */
|
---|
206 | # define PGM_SHW_TYPE PGM_TYPE_AMD64
|
---|
207 | # define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
|
---|
208 | # include "PGMAllShw.h"
|
---|
209 |
|
---|
210 | /* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
|
---|
211 | # define PGM_GST_TYPE PGM_TYPE_PROT
|
---|
212 | # define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
|
---|
213 | # define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
|
---|
214 | # define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
|
---|
215 | # define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PD_PHYS
|
---|
216 | # include "PGMAllBth.h"
|
---|
217 | # undef BTH_PGMPOOLKIND_PT_FOR_PT
|
---|
218 | # undef BTH_PGMPOOLKIND_ROOT
|
---|
219 | # undef PGM_BTH_NAME
|
---|
220 | # undef PGM_GST_TYPE
|
---|
221 | # undef PGM_GST_NAME
|
---|
222 |
|
---|
223 | # ifdef VBOX_WITH_64_BITS_GUESTS
|
---|
224 | /* Guest - AMD64 mode */
|
---|
225 | # define PGM_GST_TYPE PGM_TYPE_AMD64
|
---|
226 | # define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
|
---|
227 | # define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
|
---|
228 | # define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
|
---|
229 | # define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
|
---|
230 | # define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_64BIT_PML4
|
---|
231 | # include "PGMAllGst.h"
|
---|
232 | # include "PGMAllBth.h"
|
---|
233 | # undef BTH_PGMPOOLKIND_PT_FOR_BIG
|
---|
234 | # undef BTH_PGMPOOLKIND_PT_FOR_PT
|
---|
235 | # undef BTH_PGMPOOLKIND_ROOT
|
---|
236 | # undef PGM_BTH_NAME
|
---|
237 | # undef PGM_GST_TYPE
|
---|
238 | # undef PGM_GST_NAME
|
---|
239 | # endif /* VBOX_WITH_64_BITS_GUESTS */
|
---|
240 |
|
---|
241 | # undef PGM_SHW_TYPE
|
---|
242 | # undef PGM_SHW_NAME
|
---|
243 |
|
---|
244 |
|
---|
245 | /*
|
---|
246 | * Shadow - Nested paging mode
|
---|
247 | */
|
---|
248 | # define PGM_SHW_TYPE PGM_TYPE_NESTED
|
---|
249 | # define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED(name)
|
---|
250 | # include "PGMAllShw.h"
|
---|
251 |
|
---|
252 | /* Guest - real mode */
|
---|
253 | # define PGM_GST_TYPE PGM_TYPE_REAL
|
---|
254 | # define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
|
---|
255 | # define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_REAL(name)
|
---|
256 | # include "PGMAllBth.h"
|
---|
257 | # undef PGM_BTH_NAME
|
---|
258 | # undef PGM_GST_TYPE
|
---|
259 | # undef PGM_GST_NAME
|
---|
260 |
|
---|
261 | /* Guest - protected mode */
|
---|
262 | # define PGM_GST_TYPE PGM_TYPE_PROT
|
---|
263 | # define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
|
---|
264 | # define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PROT(name)
|
---|
265 | # include "PGMAllBth.h"
|
---|
266 | # undef PGM_BTH_NAME
|
---|
267 | # undef PGM_GST_TYPE
|
---|
268 | # undef PGM_GST_NAME
|
---|
269 |
|
---|
270 | /* Guest - 32-bit mode */
|
---|
271 | # define PGM_GST_TYPE PGM_TYPE_32BIT
|
---|
272 | # define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
|
---|
273 | # define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT(name)
|
---|
274 | # include "PGMAllBth.h"
|
---|
275 | # undef PGM_BTH_NAME
|
---|
276 | # undef PGM_GST_TYPE
|
---|
277 | # undef PGM_GST_NAME
|
---|
278 |
|
---|
279 | /* Guest - PAE mode */
|
---|
280 | # define PGM_GST_TYPE PGM_TYPE_PAE
|
---|
281 | # define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
|
---|
282 | # define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE(name)
|
---|
283 | # include "PGMAllBth.h"
|
---|
284 | # undef PGM_BTH_NAME
|
---|
285 | # undef PGM_GST_TYPE
|
---|
286 | # undef PGM_GST_NAME
|
---|
287 |
|
---|
288 | # ifdef VBOX_WITH_64_BITS_GUESTS
|
---|
289 | /* Guest - AMD64 mode */
|
---|
290 | # define PGM_GST_TYPE PGM_TYPE_AMD64
|
---|
291 | # define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
|
---|
292 | # define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64(name)
|
---|
293 | # include "PGMAllBth.h"
|
---|
294 | # undef PGM_BTH_NAME
|
---|
295 | # undef PGM_GST_TYPE
|
---|
296 | # undef PGM_GST_NAME
|
---|
297 | # endif /* VBOX_WITH_64_BITS_GUESTS */
|
---|
298 |
|
---|
299 | # undef PGM_SHW_TYPE
|
---|
300 | # undef PGM_SHW_NAME
|
---|
301 |
|
---|
302 |
|
---|
303 | /*
|
---|
304 | * Shadow - EPT
|
---|
305 | */
|
---|
306 | # define PGM_SHW_TYPE PGM_TYPE_EPT
|
---|
307 | # define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
|
---|
308 | # include "PGMAllShw.h"
|
---|
309 |
|
---|
310 | /* Guest - real mode */
|
---|
311 | # define PGM_GST_TYPE PGM_TYPE_REAL
|
---|
312 | # define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
|
---|
313 | # define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
|
---|
314 | # define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
|
---|
315 | # include "PGMAllBth.h"
|
---|
316 | # undef BTH_PGMPOOLKIND_PT_FOR_PT
|
---|
317 | # undef PGM_BTH_NAME
|
---|
318 | # undef PGM_GST_TYPE
|
---|
319 | # undef PGM_GST_NAME
|
---|
320 |
|
---|
321 | /* Guest - protected mode */
|
---|
322 | # define PGM_GST_TYPE PGM_TYPE_PROT
|
---|
323 | # define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
|
---|
324 | # define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
|
---|
325 | # define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
|
---|
326 | # include "PGMAllBth.h"
|
---|
327 | # undef BTH_PGMPOOLKIND_PT_FOR_PT
|
---|
328 | # undef PGM_BTH_NAME
|
---|
329 | # undef PGM_GST_TYPE
|
---|
330 | # undef PGM_GST_NAME
|
---|
331 |
|
---|
332 | /* Guest - 32-bit mode */
|
---|
333 | # define PGM_GST_TYPE PGM_TYPE_32BIT
|
---|
334 | # define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
|
---|
335 | # define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
|
---|
336 | # define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
|
---|
337 | # include "PGMAllBth.h"
|
---|
338 | # undef BTH_PGMPOOLKIND_PT_FOR_PT
|
---|
339 | # undef PGM_BTH_NAME
|
---|
340 | # undef PGM_GST_TYPE
|
---|
341 | # undef PGM_GST_NAME
|
---|
342 |
|
---|
343 | /* Guest - PAE mode */
|
---|
344 | # define PGM_GST_TYPE PGM_TYPE_PAE
|
---|
345 | # define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
|
---|
346 | # define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
|
---|
347 | # define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
|
---|
348 | # include "PGMAllBth.h"
|
---|
349 | # undef BTH_PGMPOOLKIND_PT_FOR_PT
|
---|
350 | # undef PGM_BTH_NAME
|
---|
351 | # undef PGM_GST_TYPE
|
---|
352 | # undef PGM_GST_NAME
|
---|
353 |
|
---|
354 | # ifdef VBOX_WITH_64_BITS_GUESTS
|
---|
355 | /* Guest - AMD64 mode */
|
---|
356 | # define PGM_GST_TYPE PGM_TYPE_AMD64
|
---|
357 | # define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
|
---|
358 | # define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
|
---|
359 | # define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
|
---|
360 | # include "PGMAllBth.h"
|
---|
361 | # undef BTH_PGMPOOLKIND_PT_FOR_PT
|
---|
362 | # undef PGM_BTH_NAME
|
---|
363 | # undef PGM_GST_TYPE
|
---|
364 | # undef PGM_GST_NAME
|
---|
365 | # endif /* VBOX_WITH_64_BITS_GUESTS */
|
---|
366 |
|
---|
367 | # undef PGM_SHW_TYPE
|
---|
368 | # undef PGM_SHW_NAME
|
---|
369 |
|
---|
370 | #endif /* !IN_RC */
|
---|
371 |
|
---|
372 |
|
---|
373 | #ifndef IN_RING3
|
---|
374 | /**
|
---|
375 | * #PF Handler.
|
---|
376 | *
|
---|
377 | * @returns VBox status code (appropriate for trap handling and GC return).
|
---|
378 | * @param pVM VM Handle.
|
---|
379 | * @param uErr The trap error code.
|
---|
380 | * @param pRegFrame Trap register frame.
|
---|
381 | * @param pvFault The fault address.
|
---|
382 | */
|
---|
383 | VMMDECL(int) PGMTrap0eHandler(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
|
---|
384 | {
|
---|
385 | LogFlow(("PGMTrap0eHandler: uErr=%RGu pvFault=%RGv eip=%RGv\n", uErr, pvFault, (RTGCPTR)pRegFrame->rip));
|
---|
386 | STAM_PROFILE_START(&pVM->pgm.s.StatRZTrap0e, a);
|
---|
387 | STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
|
---|
388 |
|
---|
389 |
|
---|
390 | #ifdef VBOX_WITH_STATISTICS
|
---|
391 | /*
|
---|
392 | * Error code stats.
|
---|
393 | */
|
---|
394 | if (uErr & X86_TRAP_PF_US)
|
---|
395 | {
|
---|
396 | if (!(uErr & X86_TRAP_PF_P))
|
---|
397 | {
|
---|
398 | if (uErr & X86_TRAP_PF_RW)
|
---|
399 | STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSNotPresentWrite);
|
---|
400 | else
|
---|
401 | STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSNotPresentRead);
|
---|
402 | }
|
---|
403 | else if (uErr & X86_TRAP_PF_RW)
|
---|
404 | STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSWrite);
|
---|
405 | else if (uErr & X86_TRAP_PF_RSVD)
|
---|
406 | STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSReserved);
|
---|
407 | else if (uErr & X86_TRAP_PF_ID)
|
---|
408 | STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSNXE);
|
---|
409 | else
|
---|
410 | STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSRead);
|
---|
411 | }
|
---|
412 | else
|
---|
413 | { /* Supervisor */
|
---|
414 | if (!(uErr & X86_TRAP_PF_P))
|
---|
415 | {
|
---|
416 | if (uErr & X86_TRAP_PF_RW)
|
---|
417 | STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSVNotPresentWrite);
|
---|
418 | else
|
---|
419 | STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSVNotPresentRead);
|
---|
420 | }
|
---|
421 | else if (uErr & X86_TRAP_PF_RW)
|
---|
422 | STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSVWrite);
|
---|
423 | else if (uErr & X86_TRAP_PF_ID)
|
---|
424 | STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSNXE);
|
---|
425 | else if (uErr & X86_TRAP_PF_RSVD)
|
---|
426 | STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSVReserved);
|
---|
427 | }
|
---|
428 | #endif /* VBOX_WITH_STATISTICS */
|
---|
429 |
|
---|
430 | /*
|
---|
431 | * Call the worker.
|
---|
432 | */
|
---|
433 | int rc = PGM_BTH_PFN(Trap0eHandler, pVM)(pVM, uErr, pRegFrame, pvFault);
|
---|
434 | if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
|
---|
435 | rc = VINF_SUCCESS;
|
---|
436 | STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eGuestPF); });
|
---|
437 | STAM_STATS({ if (!pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
|
---|
438 | pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2Misc; });
|
---|
439 | STAM_PROFILE_STOP_EX(&pVM->pgm.s.StatRZTrap0e, pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
|
---|
440 | return rc;
|
---|
441 | }
|
---|
442 | #endif /* !IN_RING3 */
|
---|
443 |
|
---|
444 |
|
---|
445 | /**
|
---|
446 | * Prefetch a page
|
---|
447 | *
|
---|
448 | * Typically used to sync commonly used pages before entering raw mode
|
---|
449 | * after a CR3 reload.
|
---|
450 | *
|
---|
451 | * @returns VBox status code suitable for scheduling.
|
---|
452 | * @retval VINF_SUCCESS on success.
|
---|
453 | * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
|
---|
454 | * @param pVM VM handle.
|
---|
455 | * @param GCPtrPage Page to invalidate.
|
---|
456 | */
|
---|
457 | VMMDECL(int) PGMPrefetchPage(PVM pVM, RTGCPTR GCPtrPage)
|
---|
458 | {
|
---|
459 | STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,Prefetch), a);
|
---|
460 | int rc = PGM_BTH_PFN(PrefetchPage, pVM)(pVM, GCPtrPage);
|
---|
461 | STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,Prefetch), a);
|
---|
462 | AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
|
---|
463 | return rc;
|
---|
464 | }
|
---|
465 |
|
---|
466 |
|
---|
467 | /**
|
---|
468 | * Gets the mapping corresponding to the specified address (if any).
|
---|
469 | *
|
---|
470 | * @returns Pointer to the mapping.
|
---|
471 | * @returns NULL if not
|
---|
472 | *
|
---|
473 | * @param pVM The virtual machine.
|
---|
474 | * @param GCPtr The guest context pointer.
|
---|
475 | */
|
---|
476 | PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr)
|
---|
477 | {
|
---|
478 | PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
|
---|
479 | while (pMapping)
|
---|
480 | {
|
---|
481 | if ((uintptr_t)GCPtr < (uintptr_t)pMapping->GCPtr)
|
---|
482 | break;
|
---|
483 | if ((uintptr_t)GCPtr - (uintptr_t)pMapping->GCPtr < pMapping->cb)
|
---|
484 | return pMapping;
|
---|
485 | pMapping = pMapping->CTX_SUFF(pNext);
|
---|
486 | }
|
---|
487 | return NULL;
|
---|
488 | }
|
---|
489 |
|
---|
490 |
|
---|
491 | /**
|
---|
492 | * Verifies a range of pages for read or write access
|
---|
493 | *
|
---|
494 | * Only checks the guest's page tables
|
---|
495 | *
|
---|
496 | * @returns VBox status code.
|
---|
497 | * @param pVM VM handle.
|
---|
498 | * @param Addr Guest virtual address to check
|
---|
499 | * @param cbSize Access size
|
---|
500 | * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
|
---|
501 | * @remarks Current not in use.
|
---|
502 | */
|
---|
503 | VMMDECL(int) PGMIsValidAccess(PVM pVM, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
|
---|
504 | {
|
---|
505 | /*
|
---|
506 | * Validate input.
|
---|
507 | */
|
---|
508 | if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
|
---|
509 | {
|
---|
510 | AssertMsgFailed(("PGMIsValidAccess: invalid access type %08x\n", fAccess));
|
---|
511 | return VERR_INVALID_PARAMETER;
|
---|
512 | }
|
---|
513 |
|
---|
514 | uint64_t fPage;
|
---|
515 | int rc = PGMGstGetPage(pVM, (RTGCPTR)Addr, &fPage, NULL);
|
---|
516 | if (RT_FAILURE(rc))
|
---|
517 | {
|
---|
518 | Log(("PGMIsValidAccess: access violation for %RGv rc=%d\n", Addr, rc));
|
---|
519 | return VINF_EM_RAW_GUEST_TRAP;
|
---|
520 | }
|
---|
521 |
|
---|
522 | /*
|
---|
523 | * Check if the access would cause a page fault
|
---|
524 | *
|
---|
525 | * Note that hypervisor page directories are not present in the guest's tables, so this check
|
---|
526 | * is sufficient.
|
---|
527 | */
|
---|
528 | bool fWrite = !!(fAccess & X86_PTE_RW);
|
---|
529 | bool fUser = !!(fAccess & X86_PTE_US);
|
---|
530 | if ( !(fPage & X86_PTE_P)
|
---|
531 | || (fWrite && !(fPage & X86_PTE_RW))
|
---|
532 | || (fUser && !(fPage & X86_PTE_US)) )
|
---|
533 | {
|
---|
534 | Log(("PGMIsValidAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPage, fWrite, fUser));
|
---|
535 | return VINF_EM_RAW_GUEST_TRAP;
|
---|
536 | }
|
---|
537 | if ( RT_SUCCESS(rc)
|
---|
538 | && PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize))
|
---|
539 | return PGMIsValidAccess(pVM, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
|
---|
540 | return rc;
|
---|
541 | }
|
---|
542 |
|
---|
543 |
|
---|
544 | /**
|
---|
545 | * Verifies a range of pages for read or write access
|
---|
546 | *
|
---|
547 | * Supports handling of pages marked for dirty bit tracking and CSAM
|
---|
548 | *
|
---|
549 | * @returns VBox status code.
|
---|
550 | * @param pVM VM handle.
|
---|
551 | * @param Addr Guest virtual address to check
|
---|
552 | * @param cbSize Access size
|
---|
553 | * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
|
---|
554 | */
|
---|
555 | VMMDECL(int) PGMVerifyAccess(PVM pVM, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
|
---|
556 | {
|
---|
557 | AssertMsg(!(fAccess & ~(X86_PTE_US | X86_PTE_RW)), ("PGMVerifyAccess: invalid access type %08x\n", fAccess));
|
---|
558 |
|
---|
559 | /*
|
---|
560 | * Get going.
|
---|
561 | */
|
---|
562 | uint64_t fPageGst;
|
---|
563 | int rc = PGMGstGetPage(pVM, (RTGCPTR)Addr, &fPageGst, NULL);
|
---|
564 | if (RT_FAILURE(rc))
|
---|
565 | {
|
---|
566 | Log(("PGMVerifyAccess: access violation for %RGv rc=%d\n", Addr, rc));
|
---|
567 | return VINF_EM_RAW_GUEST_TRAP;
|
---|
568 | }
|
---|
569 |
|
---|
570 | /*
|
---|
571 | * Check if the access would cause a page fault
|
---|
572 | *
|
---|
573 | * Note that hypervisor page directories are not present in the guest's tables, so this check
|
---|
574 | * is sufficient.
|
---|
575 | */
|
---|
576 | const bool fWrite = !!(fAccess & X86_PTE_RW);
|
---|
577 | const bool fUser = !!(fAccess & X86_PTE_US);
|
---|
578 | if ( !(fPageGst & X86_PTE_P)
|
---|
579 | || (fWrite && !(fPageGst & X86_PTE_RW))
|
---|
580 | || (fUser && !(fPageGst & X86_PTE_US)) )
|
---|
581 | {
|
---|
582 | Log(("PGMVerifyAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPageGst, fWrite, fUser));
|
---|
583 | return VINF_EM_RAW_GUEST_TRAP;
|
---|
584 | }
|
---|
585 |
|
---|
586 | if (!HWACCMIsNestedPagingActive(pVM))
|
---|
587 | {
|
---|
588 | /*
|
---|
589 | * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning
|
---|
590 | */
|
---|
591 | rc = PGMShwGetPage(pVM, (RTGCPTR)Addr, NULL, NULL);
|
---|
592 | if ( rc == VERR_PAGE_NOT_PRESENT
|
---|
593 | || rc == VERR_PAGE_TABLE_NOT_PRESENT)
|
---|
594 | {
|
---|
595 | /*
|
---|
596 | * Page is not present in our page tables.
|
---|
597 | * Try to sync it!
|
---|
598 | */
|
---|
599 | Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
|
---|
600 | uint32_t uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
|
---|
601 | rc = PGM_BTH_PFN(VerifyAccessSyncPage, pVM)(pVM, Addr, fPageGst, uErr);
|
---|
602 | if (rc != VINF_SUCCESS)
|
---|
603 | return rc;
|
---|
604 | }
|
---|
605 | else
|
---|
606 | AssertMsg(rc == VINF_SUCCESS, ("PGMShwGetPage %RGv failed with %Rrc\n", Addr, rc));
|
---|
607 | }
|
---|
608 |
|
---|
609 | #if 0 /* def VBOX_STRICT; triggers too often now */
|
---|
610 | /*
|
---|
611 | * This check is a bit paranoid, but useful.
|
---|
612 | */
|
---|
613 | /** @note this will assert when writing to monitored pages (a bit annoying actually) */
|
---|
614 | uint64_t fPageShw;
|
---|
615 | rc = PGMShwGetPage(pVM, (RTGCPTR)Addr, &fPageShw, NULL);
|
---|
616 | if ( (rc == VERR_PAGE_NOT_PRESENT || RT_FAILURE(rc))
|
---|
617 | || (fWrite && !(fPageShw & X86_PTE_RW))
|
---|
618 | || (fUser && !(fPageShw & X86_PTE_US)) )
|
---|
619 | {
|
---|
620 | AssertMsgFailed(("Unexpected access violation for %RGv! rc=%Rrc write=%d user=%d\n",
|
---|
621 | Addr, rc, fWrite && !(fPageShw & X86_PTE_RW), fUser && !(fPageShw & X86_PTE_US)));
|
---|
622 | return VINF_EM_RAW_GUEST_TRAP;
|
---|
623 | }
|
---|
624 | #endif
|
---|
625 |
|
---|
626 | if ( RT_SUCCESS(rc)
|
---|
627 | && ( PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize - 1)
|
---|
628 | || Addr + cbSize < Addr))
|
---|
629 | {
|
---|
630 | /* Don't recursively call PGMVerifyAccess as we might run out of stack. */
|
---|
631 | for (;;)
|
---|
632 | {
|
---|
633 | Addr += PAGE_SIZE;
|
---|
634 | if (cbSize > PAGE_SIZE)
|
---|
635 | cbSize -= PAGE_SIZE;
|
---|
636 | else
|
---|
637 | cbSize = 1;
|
---|
638 | rc = PGMVerifyAccess(pVM, Addr, 1, fAccess);
|
---|
639 | if (rc != VINF_SUCCESS)
|
---|
640 | break;
|
---|
641 | if (PAGE_ADDRESS(Addr) == PAGE_ADDRESS(Addr + cbSize - 1))
|
---|
642 | break;
|
---|
643 | }
|
---|
644 | }
|
---|
645 | return rc;
|
---|
646 | }
|
---|
647 |
|
---|
648 |
|
---|
649 | /**
|
---|
650 | * Emulation of the invlpg instruction (HC only actually).
|
---|
651 | *
|
---|
652 | * @returns VBox status code, special care required.
|
---|
653 | * @retval VINF_PGM_SYNC_CR3 - handled.
|
---|
654 | * @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
|
---|
655 | * @retval VERR_REM_FLUSHED_PAGES_OVERFLOW - not handled.
|
---|
656 | *
|
---|
657 | * @param pVM VM handle.
|
---|
658 | * @param GCPtrPage Page to invalidate.
|
---|
659 | *
|
---|
660 | * @remark ASSUMES the page table entry or page directory is valid. Fairly
|
---|
661 | * safe, but there could be edge cases!
|
---|
662 | *
|
---|
663 | * @todo Flush page or page directory only if necessary!
|
---|
664 | */
|
---|
665 | VMMDECL(int) PGMInvalidatePage(PVM pVM, RTGCPTR GCPtrPage)
|
---|
666 | {
|
---|
667 | int rc;
|
---|
668 | Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
|
---|
669 |
|
---|
670 | #ifndef IN_RING3
|
---|
671 | /*
|
---|
672 | * Notify the recompiler so it can record this instruction.
|
---|
673 | * Failure happens when it's out of space. We'll return to HC in that case.
|
---|
674 | */
|
---|
675 | rc = REMNotifyInvalidatePage(pVM, GCPtrPage);
|
---|
676 | if (rc != VINF_SUCCESS)
|
---|
677 | return rc;
|
---|
678 | #endif /* !IN_RING3 */
|
---|
679 |
|
---|
680 |
|
---|
681 | #ifdef IN_RC
|
---|
682 | /*
|
---|
683 | * Check for conflicts and pending CR3 monitoring updates.
|
---|
684 | */
|
---|
685 | if (!pVM->pgm.s.fMappingsFixed)
|
---|
686 | {
|
---|
687 | if ( pgmGetMapping(pVM, GCPtrPage)
|
---|
688 | && PGMGstGetPage(pVM, GCPtrPage, NULL, NULL) != VERR_PAGE_TABLE_NOT_PRESENT)
|
---|
689 | {
|
---|
690 | LogFlow(("PGMGCInvalidatePage: Conflict!\n"));
|
---|
691 | VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
|
---|
692 | STAM_COUNTER_INC(&pVM->pgm.s.StatRCInvlPgConflict);
|
---|
693 | return VINF_PGM_SYNC_CR3;
|
---|
694 | }
|
---|
695 |
|
---|
696 | if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
|
---|
697 | {
|
---|
698 | LogFlow(("PGMGCInvalidatePage: PGM_SYNC_MONITOR_CR3 -> reinterpret instruction in R3\n"));
|
---|
699 | STAM_COUNTER_INC(&pVM->pgm.s.StatRCInvlPgSyncMonCR3);
|
---|
700 | return VINF_EM_RAW_EMULATE_INSTR;
|
---|
701 | }
|
---|
702 | }
|
---|
703 | #endif /* IN_RC */
|
---|
704 |
|
---|
705 | /*
|
---|
706 | * Call paging mode specific worker.
|
---|
707 | */
|
---|
708 | STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePage), a);
|
---|
709 | rc = PGM_BTH_PFN(InvalidatePage, pVM)(pVM, GCPtrPage);
|
---|
710 | STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePage), a);
|
---|
711 |
|
---|
712 | #ifdef IN_RING3
|
---|
713 | /*
|
---|
714 | * Check if we have a pending update of the CR3 monitoring.
|
---|
715 | */
|
---|
716 | if ( RT_SUCCESS(rc)
|
---|
717 | && (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3))
|
---|
718 | {
|
---|
719 | pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
|
---|
720 | Assert(!pVM->pgm.s.fMappingsFixed);
|
---|
721 | #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
|
---|
722 | Assert(pVM->pgm.s.GCPhysCR3 == pVM->pgm.s.GCPhysGstCR3Monitored);
|
---|
723 | rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);
|
---|
724 | #endif
|
---|
725 | }
|
---|
726 |
|
---|
727 | /*
|
---|
728 | * Inform CSAM about the flush
|
---|
729 | *
|
---|
730 | * Note: This is to check if monitored pages have been changed; when we implement
|
---|
731 | * callbacks for virtual handlers, this is no longer required.
|
---|
732 | */
|
---|
733 | CSAMR3FlushPage(pVM, GCPtrPage);
|
---|
734 | #endif /* IN_RING3 */
|
---|
735 | return rc;
|
---|
736 | }
|
---|
737 |
|
---|
738 |
|
---|
739 | /**
|
---|
740 | * Executes an instruction using the interpreter.
|
---|
741 | *
|
---|
742 | * @returns VBox status code (appropriate for trap handling and GC return).
|
---|
743 | * @param pVM VM handle.
|
---|
744 | * @param pRegFrame Register frame.
|
---|
745 | * @param pvFault Fault address.
|
---|
746 | */
|
---|
747 | VMMDECL(int) PGMInterpretInstruction(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
|
---|
748 | {
|
---|
749 | uint32_t cb;
|
---|
750 | int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
|
---|
751 | if (rc == VERR_EM_INTERPRETER)
|
---|
752 | rc = VINF_EM_RAW_EMULATE_INSTR;
|
---|
753 | if (rc != VINF_SUCCESS)
|
---|
754 | Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%RGv)\n", rc, pvFault));
|
---|
755 | return rc;
|
---|
756 | }
|
---|
757 |
|
---|
758 |
|
---|
759 | /**
|
---|
760 | * Gets effective page information (from the VMM page directory).
|
---|
761 | *
|
---|
762 | * @returns VBox status.
|
---|
763 | * @param pVM VM Handle.
|
---|
764 | * @param GCPtr Guest Context virtual address of the page.
|
---|
765 | * @param pfFlags Where to store the flags. These are X86_PTE_*.
|
---|
766 | * @param pHCPhys Where to store the HC physical address of the page.
|
---|
767 | * This is page aligned.
|
---|
768 | * @remark You should use PGMMapGetPage() for pages in a mapping.
|
---|
769 | */
|
---|
770 | VMMDECL(int) PGMShwGetPage(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
|
---|
771 | {
|
---|
772 | return PGM_SHW_PFN(GetPage,pVM)(pVM, GCPtr, pfFlags, pHCPhys);
|
---|
773 | }
|
---|
774 |
|
---|
775 |
|
---|
776 | /**
|
---|
777 | * Sets (replaces) the page flags for a range of pages in the shadow context.
|
---|
778 | *
|
---|
779 | * @returns VBox status.
|
---|
780 | * @param pVM VM handle.
|
---|
781 | * @param GCPtr The address of the first page.
|
---|
782 | * @param cb The size of the range in bytes.
|
---|
783 | * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
|
---|
784 | * @remark You must use PGMMapSetPage() for pages in a mapping.
|
---|
785 | */
|
---|
786 | VMMDECL(int) PGMShwSetPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
|
---|
787 | {
|
---|
788 | return PGMShwModifyPage(pVM, GCPtr, cb, fFlags, 0);
|
---|
789 | }
|
---|
790 |
|
---|
791 |
|
---|
792 | /**
|
---|
793 | * Modify page flags for a range of pages in the shadow context.
|
---|
794 | *
|
---|
795 | * The existing flags are ANDed with the fMask and ORed with the fFlags.
|
---|
796 | *
|
---|
797 | * @returns VBox status code.
|
---|
798 | * @param pVM VM handle.
|
---|
799 | * @param GCPtr Virtual address of the first page in the range.
|
---|
800 | * @param cb Size (in bytes) of the range to apply the modification to.
|
---|
801 | * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
|
---|
802 | * @param fMask The AND mask - page flags X86_PTE_*.
|
---|
803 | * Be very CAREFUL when ~'ing constants which could be 32-bit!
|
---|
804 | * @remark You must use PGMMapModifyPage() for pages in a mapping.
|
---|
805 | */
|
---|
806 | VMMDECL(int) PGMShwModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
|
---|
807 | {
|
---|
808 | AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
|
---|
809 | Assert(cb);
|
---|
810 |
|
---|
811 | /*
|
---|
812 | * Align the input.
|
---|
813 | */
|
---|
814 | cb += GCPtr & PAGE_OFFSET_MASK;
|
---|
815 | cb = RT_ALIGN_Z(cb, PAGE_SIZE);
|
---|
816 | GCPtr = (GCPtr & PAGE_BASE_GC_MASK); /** @todo this ain't necessary, right... */
|
---|
817 |
|
---|
818 | /*
|
---|
819 | * Call worker.
|
---|
820 | */
|
---|
821 | return PGM_SHW_PFN(ModifyPage, pVM)(pVM, GCPtr, cb, fFlags, fMask);
|
---|
822 | }
|
---|
823 |
|
---|
824 |
|
---|
825 | /**
|
---|
826 | * Gets the SHADOW page directory pointer for the specified address.
|
---|
827 | *
|
---|
828 | * @returns VBox status.
|
---|
829 | * @param pVM VM handle.
|
---|
830 | * @param GCPtr The address.
|
---|
831 | * @param ppPdpt Receives address of pdpt
|
---|
832 | * @param ppPD Receives address of page directory
|
---|
833 | * @remarks Unused.
|
---|
834 | */
|
---|
835 | DECLINLINE(int) pgmShwGetPAEPDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
|
---|
836 | {
|
---|
837 | PPGM pPGM = &pVM->pgm.s;
|
---|
838 | PPGMPOOL pPool = pPGM->CTX_SUFF(pPool);
|
---|
839 | PPGMPOOLPAGE pShwPage;
|
---|
840 |
|
---|
841 | Assert(!HWACCMIsNestedPagingActive(pVM));
|
---|
842 |
|
---|
843 | const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
|
---|
844 | PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
|
---|
845 | PX86PDPE pPdpe = &pPdpt->a[iPdPt];
|
---|
846 |
|
---|
847 | *ppPdpt = pPdpt;
|
---|
848 | if (!pPdpe->n.u1Present)
|
---|
849 | return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
|
---|
850 |
|
---|
851 | pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
|
---|
852 | AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
|
---|
853 |
|
---|
854 | *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
|
---|
855 | return VINF_SUCCESS;
|
---|
856 | }
|
---|
857 |
|
---|
858 | #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
|
---|
859 |
|
---|
860 | /**
|
---|
861 | * Gets the shadow page directory for the specified address, PAE.
|
---|
862 | *
|
---|
863 | * @returns Pointer to the shadow PD.
|
---|
864 | * @param pVM VM handle.
|
---|
865 | * @param GCPtr The address.
|
---|
866 | * @param pGstPdpe Guest PDPT entry
|
---|
867 | * @param ppPD Receives address of page directory
|
---|
868 | */
|
---|
869 | int pgmShwSyncPaePDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD)
|
---|
870 | {
|
---|
871 | const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
|
---|
872 | PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
|
---|
873 | PX86PDPE pPdpe = &pPdpt->a[iPdPt];
|
---|
874 | PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
|
---|
875 | PPGMPOOLPAGE pShwPage;
|
---|
876 | int rc;
|
---|
877 |
|
---|
878 | /* Allocate page directory if not present. */
|
---|
879 | if ( !pPdpe->n.u1Present
|
---|
880 | && !(pPdpe->u & X86_PDPE_PG_MASK))
|
---|
881 | {
|
---|
882 | bool fNestedPaging = HWACCMIsNestedPagingActive(pVM);
|
---|
883 | bool fPaging = !!(CPUMGetGuestCR0(pVM) & X86_CR0_PG);
|
---|
884 | RTGCPTR64 GCPdPt;
|
---|
885 | PGMPOOLKIND enmKind;
|
---|
886 |
|
---|
887 | if (fNestedPaging || !fPaging)
|
---|
888 | {
|
---|
889 | /* AMD-V nested paging or real/protected mode without paging */
|
---|
890 | GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
|
---|
891 | enmKind = PGMPOOLKIND_PAE_PD_PHYS;
|
---|
892 | }
|
---|
893 | else
|
---|
894 | {
|
---|
895 | Assert(pGstPdpe);
|
---|
896 |
|
---|
897 | if (CPUMGetGuestCR4(pVM) & X86_CR4_PAE)
|
---|
898 | {
|
---|
899 | GCPdPt = pGstPdpe->u & X86_PDPE_PG_MASK;
|
---|
900 | enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;
|
---|
901 | }
|
---|
902 | else
|
---|
903 | {
|
---|
904 | GCPdPt = CPUMGetGuestCR3(pVM);
|
---|
905 | enmKind = (PGMPOOLKIND)(PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD + iPdPt);
|
---|
906 | }
|
---|
907 | }
|
---|
908 |
|
---|
909 | /* Create a reference back to the PDPT by using the index in its shadow page. */
|
---|
910 | rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, &pShwPage);
|
---|
911 | if (rc == VERR_PGM_POOL_FLUSHED)
|
---|
912 | {
|
---|
913 | Log(("pgmShwSyncPaePDPtr: PGM pool flushed -> signal sync cr3\n"));
|
---|
914 | Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
|
---|
915 | VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
|
---|
916 | return VINF_PGM_SYNC_CR3;
|
---|
917 | }
|
---|
918 | AssertRCReturn(rc, rc);
|
---|
919 | }
|
---|
920 | else
|
---|
921 | {
|
---|
922 | pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
|
---|
923 | AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
|
---|
924 | }
|
---|
925 | /* The PD was cached or created; hook it up now. */
|
---|
926 | pPdpe->u |= pShwPage->Core.Key
|
---|
927 | | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
|
---|
928 |
|
---|
929 | *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
|
---|
930 | return VINF_SUCCESS;
|
---|
931 | }
|
---|
932 |
|
---|
933 | /**
|
---|
934 | * Gets the pointer to the shadow page directory entry for an address, PAE.
|
---|
935 | *
|
---|
936 | * @returns Pointer to the PDE.
|
---|
937 | * @param pPGM Pointer to the PGM instance data.
|
---|
938 | * @param GCPtr The address.
|
---|
939 | * @param ppShwPde Receives the address of the pgm pool page for the shadow page directory
|
---|
940 | */
|
---|
941 | DECLINLINE(int) pgmShwGetPaePoolPagePD(PPGM pPGM, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde)
|
---|
942 | {
|
---|
943 | const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
|
---|
944 | PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pPGM);
|
---|
945 | AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */
|
---|
946 | if (!pPdpt->a[iPdPt].n.u1Present)
|
---|
947 | return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
|
---|
948 |
|
---|
949 | /* Fetch the pgm pool shadow descriptor. */
|
---|
950 | PPGMPOOLPAGE pShwPde = pgmPoolGetPageByHCPhys(PGM2VM(pPGM), pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
|
---|
951 | AssertReturn(pShwPde, VERR_INTERNAL_ERROR);
|
---|
952 |
|
---|
953 | *ppShwPde = pShwPde;
|
---|
954 | return VINF_SUCCESS;
|
---|
955 | }
|
---|
956 | #endif
|
---|
957 |
|
---|
958 | #ifndef IN_RC
|
---|
959 |
|
---|
960 | /**
|
---|
961 | * Syncs the SHADOW page directory pointer for the specified address.
|
---|
962 | *
|
---|
963 | * Allocates backing pages in case the PDPT or PML4 entry is missing.
|
---|
964 | *
|
---|
965 | * The caller is responsible for making sure the guest has a valid PD before
|
---|
966 | * calling this function.
|
---|
967 | *
|
---|
968 | * @returns VBox status.
|
---|
969 | * @param pVM VM handle.
|
---|
970 | * @param GCPtr The address.
|
---|
971 | * @param pGstPml4e Guest PML4 entry
|
---|
972 | * @param pGstPdpe Guest PDPT entry
|
---|
973 | * @param ppPD Receives address of page directory
|
---|
974 | */
|
---|
975 | int pgmShwSyncLongModePDPtr(PVM pVM, RTGCPTR64 GCPtr, PX86PML4E pGstPml4e, PX86PDPE pGstPdpe, PX86PDPAE *ppPD)
|
---|
976 | {
|
---|
977 | PPGM pPGM = &pVM->pgm.s;
|
---|
978 | PPGMPOOL pPool = pPGM->CTX_SUFF(pPool);
|
---|
979 | const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
|
---|
980 | PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pPGM, iPml4);
|
---|
981 | bool fNestedPaging = HWACCMIsNestedPagingActive(pVM);
|
---|
982 | #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
|
---|
983 | bool fPaging = !!(CPUMGetGuestCR0(pVM) & X86_CR0_PG);
|
---|
984 | #endif
|
---|
985 | PPGMPOOLPAGE pShwPage;
|
---|
986 | int rc;
|
---|
987 |
|
---|
988 | /* Allocate page directory pointer table if not present. */
|
---|
989 | if ( !pPml4e->n.u1Present
|
---|
990 | && !(pPml4e->u & X86_PML4E_PG_MASK))
|
---|
991 | {
|
---|
992 | #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
|
---|
993 | RTGCPTR64 GCPml4;
|
---|
994 | PGMPOOLKIND enmKind;
|
---|
995 |
|
---|
996 | Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3));
|
---|
997 |
|
---|
998 | if (fNestedPaging || !fPaging)
|
---|
999 | {
|
---|
1000 | /* AMD-V nested paging or real/protected mode without paging */
|
---|
1001 | GCPml4 = (RTGCPTR64)iPml4 << X86_PML4_SHIFT;
|
---|
1002 | enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_PHYS;
|
---|
1003 | }
|
---|
1004 | else
|
---|
1005 | {
|
---|
1006 | Assert(pGstPml4e && pGstPdpe);
|
---|
1007 |
|
---|
1008 | GCPml4 = pGstPml4e->u & X86_PML4E_PG_MASK;
|
---|
1009 | enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT;
|
---|
1010 | }
|
---|
1011 |
|
---|
1012 | /* Create a reference back to the PDPT by using the index in its shadow page. */
|
---|
1013 | rc = pgmPoolAlloc(pVM, GCPml4, enmKind, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, &pShwPage);
|
---|
1014 | #else
|
---|
1015 | if (!fNestedPaging)
|
---|
1016 | {
|
---|
1017 | Assert(pGstPml4e && pGstPdpe);
|
---|
1018 | Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3));
|
---|
1019 |
|
---|
1020 | rc = pgmPoolAlloc(pVM, pGstPml4e->u & X86_PML4E_PG_MASK,
|
---|
1021 | PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, &pShwPage);
|
---|
1022 | }
|
---|
1023 | else
|
---|
1024 | {
|
---|
1025 | /* AMD-V nested paging. (Intel EPT never comes here) */
|
---|
1026 | RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
|
---|
1027 | rc = pgmPoolAlloc(pVM, GCPml4 + RT_BIT_64(63) /* hack: make the address unique */,
|
---|
1028 | PGMPOOLKIND_64BIT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4, &pShwPage);
|
---|
1029 | }
|
---|
1030 | #endif
|
---|
1031 | if (rc == VERR_PGM_POOL_FLUSHED)
|
---|
1032 | {
|
---|
1033 | Log(("PGMShwSyncLongModePDPtr: PGM pool flushed (1) -> signal sync cr3\n"));
|
---|
1034 | Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
|
---|
1035 | VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
|
---|
1036 | return VINF_PGM_SYNC_CR3;
|
---|
1037 | }
|
---|
1038 | AssertRCReturn(rc, rc);
|
---|
1039 | }
|
---|
1040 | else
|
---|
1041 | {
|
---|
1042 | pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
|
---|
1043 | AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
|
---|
1044 | }
|
---|
1045 | /* The PDPT was cached or created; hook it up now. */
|
---|
1046 | pPml4e->u |= pShwPage->Core.Key
|
---|
1047 | | (pGstPml4e->u & ~(X86_PML4E_PG_MASK | X86_PML4E_AVL_MASK | X86_PML4E_PCD | X86_PML4E_PWT));
|
---|
1048 |
|
---|
1049 | const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
|
---|
1050 | PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
|
---|
1051 | PX86PDPE pPdpe = &pPdpt->a[iPdPt];
|
---|
1052 |
|
---|
1053 | /* Allocate page directory if not present. */
|
---|
1054 | if ( !pPdpe->n.u1Present
|
---|
1055 | && !(pPdpe->u & X86_PDPE_PG_MASK))
|
---|
1056 | {
|
---|
1057 | #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
|
---|
1058 | RTGCPTR64 GCPdPt;
|
---|
1059 | PGMPOOLKIND enmKind;
|
---|
1060 |
|
---|
1061 | if (fNestedPaging || !fPaging)
|
---|
1062 | {
|
---|
1063 | /* AMD-V nested paging or real/protected mode without paging */
|
---|
1064 | GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
|
---|
1065 | enmKind = PGMPOOLKIND_64BIT_PD_FOR_PHYS;
|
---|
1066 | }
|
---|
1067 | else
|
---|
1068 | {
|
---|
1069 | Assert(pGstPdpe);
|
---|
1070 |
|
---|
1071 | GCPdPt = pGstPdpe->u & X86_PDPE_PG_MASK;
|
---|
1072 | enmKind = PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD;
|
---|
1073 | }
|
---|
1074 |
|
---|
1075 | /* Create a reference back to the PDPT by using the index in its shadow page. */
|
---|
1076 | rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, pShwPage->idx, iPdPt, &pShwPage);
|
---|
1077 | #else
|
---|
1078 | if (!fNestedPaging)
|
---|
1079 | {
|
---|
1080 | Assert(pGstPml4e && pGstPdpe);
|
---|
1081 | Assert(!(pPdpe->u & X86_PDPE_PG_MASK));
|
---|
1082 | /* Create a reference back to the PDPT by using the index in its shadow page. */
|
---|
1083 | rc = pgmPoolAlloc(pVM, pGstPdpe->u & X86_PDPE_PG_MASK, PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD, pShwPage->idx, iPdPt, &pShwPage);
|
---|
1084 | }
|
---|
1085 | else
|
---|
1086 | {
|
---|
1087 | /* AMD-V nested paging. (Intel EPT never comes here) */
|
---|
1088 | RTGCPTR64 GCPdPt = (RTGCPTR64)iPdPt << EPT_PDPT_SHIFT;
|
---|
1089 |
|
---|
1090 | rc = pgmPoolAlloc(pVM, GCPdPt + RT_BIT_64(62) /* hack: make the address unique */, PGMPOOLKIND_64BIT_PD_FOR_PHYS, pShwPage->idx, iPdPt, &pShwPage);
|
---|
1091 | }
|
---|
1092 | #endif
|
---|
1093 | if (rc == VERR_PGM_POOL_FLUSHED)
|
---|
1094 | {
|
---|
1095 | Log(("PGMShwSyncLongModePDPtr: PGM pool flushed (2) -> signal sync cr3\n"));
|
---|
1096 | Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
|
---|
1097 | VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
|
---|
1098 | return VINF_PGM_SYNC_CR3;
|
---|
1099 | }
|
---|
1100 | AssertRCReturn(rc, rc);
|
---|
1101 | }
|
---|
1102 | else
|
---|
1103 | {
|
---|
1104 | pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
|
---|
1105 | AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
|
---|
1106 | }
|
---|
1107 | /* The PD was cached or created; hook it up now. */
|
---|
1108 | pPdpe->u |= pShwPage->Core.Key
|
---|
1109 | | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
|
---|
1110 |
|
---|
1111 | *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
|
---|
1112 | return VINF_SUCCESS;
|
---|
1113 | }
|
---|
1114 |
|
---|
1115 |
|
---|
1116 | /**
|
---|
1117 | * Gets the SHADOW page directory pointer for the specified address (long mode).
|
---|
1118 | *
|
---|
1119 | * @returns VBox status.
|
---|
1120 | * @param pVM VM handle.
|
---|
1121 | * @param GCPtr The address.
|
---|
1122 | * @param ppPdpt Receives address of pdpt
|
---|
1123 | * @param ppPD Receives address of page directory
|
---|
1124 | */
|
---|
1125 | DECLINLINE(int) pgmShwGetLongModePDPtr(PVM pVM, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
|
---|
1126 | {
|
---|
1127 | PPGM pPGM = &pVM->pgm.s;
|
---|
1128 | const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
|
---|
1129 | PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pPGM, iPml4);
|
---|
1130 | AssertReturn(pPml4e, VERR_INTERNAL_ERROR);
|
---|
1131 | if (ppPml4e)
|
---|
1132 | *ppPml4e = (PX86PML4E)pPml4e;
|
---|
1133 | if (!pPml4e->n.u1Present)
|
---|
1134 | return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
|
---|
1135 |
|
---|
1136 | PPGMPOOL pPool = pPGM->CTX_SUFF(pPool);
|
---|
1137 | PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
|
---|
1138 | AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
|
---|
1139 |
|
---|
1140 | const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
|
---|
1141 | PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
|
---|
1142 | if (!pPdpt->a[iPdPt].n.u1Present)
|
---|
1143 | return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
|
---|
1144 |
|
---|
1145 | pShwPage = pgmPoolGetPage(pPool, pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
|
---|
1146 | AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
|
---|
1147 |
|
---|
1148 | *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
|
---|
1149 | return VINF_SUCCESS;
|
---|
1150 | }
|
---|
1151 |
|
---|
1152 |
|
---|
1153 | /**
|
---|
1154 | * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
|
---|
1155 | * backing pages in case the PDPT or PML4 entry is missing.
|
---|
1156 | *
|
---|
1157 | * @returns VBox status.
|
---|
1158 | * @param pVM VM handle.
|
---|
1159 | * @param GCPtr The address.
|
---|
1160 | * @param ppPdpt Receives address of pdpt
|
---|
1161 | * @param ppPD Receives address of page directory
|
---|
1162 | */
|
---|
1163 | int pgmShwGetEPTPDPtr(PVM pVM, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
|
---|
1164 | {
|
---|
1165 | PPGM pPGM = &pVM->pgm.s;
|
---|
1166 | const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
|
---|
1167 | PPGMPOOL pPool = pPGM->CTX_SUFF(pPool);
|
---|
1168 | PEPTPML4 pPml4;
|
---|
1169 | PEPTPML4E pPml4e;
|
---|
1170 | PPGMPOOLPAGE pShwPage;
|
---|
1171 | int rc;
|
---|
1172 |
|
---|
1173 | Assert(HWACCMIsNestedPagingActive(pVM));
|
---|
1174 |
|
---|
1175 | # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
|
---|
1176 | rc = PGM_HCPHYS_2_PTR(pVM, pPGM->HCPhysShwNestedRoot, &pPml4);
|
---|
1177 | AssertRCReturn(rc, rc);
|
---|
1178 | # else
|
---|
1179 | pPml4 = (PEPTPML4)pPGM->CTX_SUFF(pShwNestedRoot);
|
---|
1180 | # endif
|
---|
1181 | Assert(pPml4);
|
---|
1182 |
|
---|
1183 | /* Allocate page directory pointer table if not present. */
|
---|
1184 | pPml4e = &pPml4->a[iPml4];
|
---|
1185 | if ( !pPml4e->n.u1Present
|
---|
1186 | && !(pPml4e->u & EPT_PML4E_PG_MASK))
|
---|
1187 | {
|
---|
1188 | Assert(!(pPml4e->u & EPT_PML4E_PG_MASK));
|
---|
1189 | RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
|
---|
1190 |
|
---|
1191 | #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
|
---|
1192 | rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4, &pShwPage);
|
---|
1193 | #else
|
---|
1194 | rc = pgmPoolAlloc(pVM, GCPml4 + RT_BIT_64(63) /* hack: make the address unique */, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4, &pShwPage);
|
---|
1195 | #endif
|
---|
1196 | if (rc == VERR_PGM_POOL_FLUSHED)
|
---|
1197 | {
|
---|
1198 | Log(("PGMShwSyncEPTPDPtr: PGM pool flushed (1) -> signal sync cr3\n"));
|
---|
1199 | Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
|
---|
1200 | VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
|
---|
1201 | return VINF_PGM_SYNC_CR3;
|
---|
1202 | }
|
---|
1203 | AssertRCReturn(rc, rc);
|
---|
1204 | }
|
---|
1205 | else
|
---|
1206 | {
|
---|
1207 | pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
|
---|
1208 | AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
|
---|
1209 | }
|
---|
1210 | /* The PDPT was cached or created; hook it up now and fill with the default value. */
|
---|
1211 | pPml4e->u = pShwPage->Core.Key;
|
---|
1212 | pPml4e->n.u1Present = 1;
|
---|
1213 | pPml4e->n.u1Write = 1;
|
---|
1214 | pPml4e->n.u1Execute = 1;
|
---|
1215 |
|
---|
1216 | const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
|
---|
1217 | PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
|
---|
1218 | PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
|
---|
1219 |
|
---|
1220 | if (ppPdpt)
|
---|
1221 | *ppPdpt = pPdpt;
|
---|
1222 |
|
---|
1223 | /* Allocate page directory if not present. */
|
---|
1224 | if ( !pPdpe->n.u1Present
|
---|
1225 | && !(pPdpe->u & EPT_PDPTE_PG_MASK))
|
---|
1226 | {
|
---|
1227 | RTGCPTR64 GCPdPt = (RTGCPTR64)iPdPt << EPT_PDPT_SHIFT;
|
---|
1228 |
|
---|
1229 | #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
|
---|
1230 | rc = pgmPoolAlloc(pVM, GCPdPt, PGMPOOLKIND_64BIT_PD_FOR_PHYS, pShwPage->idx, iPdPt, &pShwPage);
|
---|
1231 | #else
|
---|
1232 | rc = pgmPoolAlloc(pVM, GCPdPt + RT_BIT_64(62) /* hack: make the address unique */, PGMPOOLKIND_64BIT_PD_FOR_PHYS, pShwPage->idx, iPdPt, &pShwPage);
|
---|
1233 | #endif
|
---|
1234 | if (rc == VERR_PGM_POOL_FLUSHED)
|
---|
1235 | {
|
---|
1236 | Log(("PGMShwSyncEPTPDPtr: PGM pool flushed (2) -> signal sync cr3\n"));
|
---|
1237 | Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
|
---|
1238 | VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
|
---|
1239 | return VINF_PGM_SYNC_CR3;
|
---|
1240 | }
|
---|
1241 | AssertRCReturn(rc, rc);
|
---|
1242 | }
|
---|
1243 | else
|
---|
1244 | {
|
---|
1245 | pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
|
---|
1246 | AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
|
---|
1247 | }
|
---|
1248 | /* The PD was cached or created; hook it up now and fill with the default value. */
|
---|
1249 | pPdpe->u = pShwPage->Core.Key;
|
---|
1250 | pPdpe->n.u1Present = 1;
|
---|
1251 | pPdpe->n.u1Write = 1;
|
---|
1252 | pPdpe->n.u1Execute = 1;
|
---|
1253 |
|
---|
1254 | *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
|
---|
1255 | return VINF_SUCCESS;
|
---|
1256 | }
|
---|
1257 |
|
---|
1258 | #endif /* IN_RC */
|
---|
1259 |
|
---|
1260 | /**
|
---|
1261 | * Gets effective Guest OS page information.
|
---|
1262 | *
|
---|
1263 | * When GCPtr is in a big page, the function will return as if it was a normal
|
---|
1264 | * 4KB page. If the need for distinguishing between big and normal page becomes
|
---|
1265 | * necessary at a later point, a PGMGstGetPage() will be created for that
|
---|
1266 | * purpose.
|
---|
1267 | *
|
---|
1268 | * @returns VBox status.
|
---|
1269 | * @param pVM VM Handle.
|
---|
1270 | * @param GCPtr Guest Context virtual address of the page.
|
---|
1271 | * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
|
---|
1272 | * @param pGCPhys Where to store the GC physical address of the page.
|
---|
1273 | * This is page aligned. The fact that the
|
---|
1274 | */
|
---|
1275 | VMMDECL(int) PGMGstGetPage(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
|
---|
1276 | {
|
---|
1277 | return PGM_GST_PFN(GetPage,pVM)(pVM, GCPtr, pfFlags, pGCPhys);
|
---|
1278 | }
|
---|
1279 |
|
---|
1280 |
|
---|
1281 | /**
|
---|
1282 | * Checks if the page is present.
|
---|
1283 | *
|
---|
1284 | * @returns true if the page is present.
|
---|
1285 | * @returns false if the page is not present.
|
---|
1286 | * @param pVM The VM handle.
|
---|
1287 | * @param GCPtr Address within the page.
|
---|
1288 | */
|
---|
1289 | VMMDECL(bool) PGMGstIsPagePresent(PVM pVM, RTGCPTR GCPtr)
|
---|
1290 | {
|
---|
1291 | int rc = PGMGstGetPage(pVM, GCPtr, NULL, NULL);
|
---|
1292 | return RT_SUCCESS(rc);
|
---|
1293 | }
|
---|
1294 |
|
---|
1295 |
|
---|
1296 | /**
|
---|
1297 | * Sets (replaces) the page flags for a range of pages in the guest's tables.
|
---|
1298 | *
|
---|
1299 | * @returns VBox status.
|
---|
1300 | * @param pVM VM handle.
|
---|
1301 | * @param GCPtr The address of the first page.
|
---|
1302 | * @param cb The size of the range in bytes.
|
---|
1303 | * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
|
---|
1304 | */
|
---|
1305 | VMMDECL(int) PGMGstSetPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
|
---|
1306 | {
|
---|
1307 | return PGMGstModifyPage(pVM, GCPtr, cb, fFlags, 0);
|
---|
1308 | }
|
---|
1309 |
|
---|
1310 |
|
---|
1311 | /**
|
---|
1312 | * Modify page flags for a range of pages in the guest's tables
|
---|
1313 | *
|
---|
1314 | * The existing flags are ANDed with the fMask and ORed with the fFlags.
|
---|
1315 | *
|
---|
1316 | * @returns VBox status code.
|
---|
1317 | * @param pVM VM handle.
|
---|
1318 | * @param GCPtr Virtual address of the first page in the range.
|
---|
1319 | * @param cb Size (in bytes) of the range to apply the modification to.
|
---|
1320 | * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
|
---|
1321 | * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
|
---|
1322 | * Be very CAREFUL when ~'ing constants which could be 32-bit!
|
---|
1323 | */
|
---|
1324 | VMMDECL(int) PGMGstModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
|
---|
1325 | {
|
---|
1326 | STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,GstModifyPage), a);
|
---|
1327 |
|
---|
1328 | /*
|
---|
1329 | * Validate input.
|
---|
1330 | */
|
---|
1331 | AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
|
---|
1332 | Assert(cb);
|
---|
1333 |
|
---|
1334 | LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
|
---|
1335 |
|
---|
1336 | /*
|
---|
1337 | * Adjust input.
|
---|
1338 | */
|
---|
1339 | cb += GCPtr & PAGE_OFFSET_MASK;
|
---|
1340 | cb = RT_ALIGN_Z(cb, PAGE_SIZE);
|
---|
1341 | GCPtr = (GCPtr & PAGE_BASE_GC_MASK);
|
---|
1342 |
|
---|
1343 | /*
|
---|
1344 | * Call worker.
|
---|
1345 | */
|
---|
1346 | int rc = PGM_GST_PFN(ModifyPage, pVM)(pVM, GCPtr, cb, fFlags, fMask);
|
---|
1347 |
|
---|
1348 | STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,GstModifyPage), a);
|
---|
1349 | return rc;
|
---|
1350 | }
|
---|
1351 |
|
---|
1352 |
|
---|
1353 | /**
|
---|
1354 | * Gets the specified page directory pointer table entry.
|
---|
1355 | *
|
---|
1356 | * @returns PDP entry
|
---|
1357 | * @param pPGM Pointer to the PGM instance data.
|
---|
1358 | * @param iPdpt PDPT index
|
---|
1359 | */
|
---|
1360 | VMMDECL(X86PDPE) PGMGstGetPaePDPtr(PVM pVM, unsigned iPdpt)
|
---|
1361 | {
|
---|
1362 | Assert(iPdpt <= 3);
|
---|
1363 | return pgmGstGetPaePDPTPtr(&pVM->pgm.s)->a[iPdpt & 3];
|
---|
1364 | }
|
---|
1365 |
|
---|
1366 |
|
---|
1367 | /**
|
---|
1368 | * Gets the current CR3 register value for the shadow memory context.
|
---|
1369 | * @returns CR3 value.
|
---|
1370 | * @param pVM The VM handle.
|
---|
1371 | */
|
---|
1372 | VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVM pVM)
|
---|
1373 | {
|
---|
1374 | #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
|
---|
1375 | PGMMODE enmShadowMode = pVM->pgm.s.enmShadowMode;
|
---|
1376 | switch (enmShadowMode)
|
---|
1377 | {
|
---|
1378 | case PGMMODE_EPT:
|
---|
1379 | return pVM->pgm.s.HCPhysShwNestedRoot;
|
---|
1380 |
|
---|
1381 | default:
|
---|
1382 | return pVM->pgm.s.HCPhysShwCR3;
|
---|
1383 | }
|
---|
1384 | #else
|
---|
1385 | PGMMODE enmShadowMode = pVM->pgm.s.enmShadowMode;
|
---|
1386 | switch (enmShadowMode)
|
---|
1387 | {
|
---|
1388 | case PGMMODE_32_BIT:
|
---|
1389 | return pVM->pgm.s.HCPhysShw32BitPD;
|
---|
1390 |
|
---|
1391 | case PGMMODE_PAE:
|
---|
1392 | case PGMMODE_PAE_NX:
|
---|
1393 | return pVM->pgm.s.HCPhysShwPaePdpt;
|
---|
1394 |
|
---|
1395 | case PGMMODE_AMD64:
|
---|
1396 | case PGMMODE_AMD64_NX:
|
---|
1397 | return pVM->pgm.s.HCPhysShwCR3;
|
---|
1398 |
|
---|
1399 | case PGMMODE_EPT:
|
---|
1400 | return pVM->pgm.s.HCPhysShwNestedRoot;
|
---|
1401 |
|
---|
1402 | case PGMMODE_NESTED:
|
---|
1403 | return PGMGetNestedCR3(pVM, PGMGetHostMode(pVM));
|
---|
1404 |
|
---|
1405 | default:
|
---|
1406 | AssertMsgFailed(("enmShadowMode=%d\n", enmShadowMode));
|
---|
1407 | return ~0;
|
---|
1408 | }
|
---|
1409 | #endif
|
---|
1410 | }
|
---|
1411 |
|
---|
1412 |
|
---|
1413 | /**
|
---|
1414 | * Gets the current CR3 register value for the nested memory context.
|
---|
1415 | * @returns CR3 value.
|
---|
1416 | * @param pVM The VM handle.
|
---|
1417 | */
|
---|
1418 | VMMDECL(RTHCPHYS) PGMGetNestedCR3(PVM pVM, PGMMODE enmShadowMode)
|
---|
1419 | {
|
---|
1420 | #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
|
---|
1421 | return pVM->pgm.s.HCPhysShwCR3;
|
---|
1422 | #else
|
---|
1423 | switch (enmShadowMode)
|
---|
1424 | {
|
---|
1425 | case PGMMODE_32_BIT:
|
---|
1426 | return pVM->pgm.s.HCPhysShw32BitPD;
|
---|
1427 |
|
---|
1428 | case PGMMODE_PAE:
|
---|
1429 | case PGMMODE_PAE_NX:
|
---|
1430 | return pVM->pgm.s.HCPhysShwPaePdpt;
|
---|
1431 |
|
---|
1432 | case PGMMODE_AMD64:
|
---|
1433 | case PGMMODE_AMD64_NX:
|
---|
1434 | return pVM->pgm.s.HCPhysShwCR3;
|
---|
1435 |
|
---|
1436 | default:
|
---|
1437 | AssertMsgFailed(("enmShadowMode=%d\n", enmShadowMode));
|
---|
1438 | return ~0;
|
---|
1439 | }
|
---|
1440 | #endif
|
---|
1441 | }
|
---|
1442 |
|
---|
1443 |
|
---|
1444 | /**
|
---|
1445 | * Gets the current CR3 register value for the EPT paging memory context.
|
---|
1446 | * @returns CR3 value.
|
---|
1447 | * @param pVM The VM handle.
|
---|
1448 | */
|
---|
1449 | VMMDECL(RTHCPHYS) PGMGetEPTCR3(PVM pVM)
|
---|
1450 | {
|
---|
1451 | return pVM->pgm.s.HCPhysShwNestedRoot;
|
---|
1452 | }
|
---|
1453 |
|
---|
1454 |
|
---|
1455 | /**
|
---|
1456 | * Gets the CR3 register value for the 32-Bit shadow memory context.
|
---|
1457 | * @returns CR3 value.
|
---|
1458 | * @param pVM The VM handle.
|
---|
1459 | */
|
---|
1460 | VMMDECL(RTHCPHYS) PGMGetHyper32BitCR3(PVM pVM)
|
---|
1461 | {
|
---|
1462 | #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
|
---|
1463 | return pVM->pgm.s.HCPhysShwCR3;
|
---|
1464 | #else
|
---|
1465 | return pVM->pgm.s.HCPhysShw32BitPD;
|
---|
1466 | #endif
|
---|
1467 | }
|
---|
1468 |
|
---|
1469 |
|
---|
1470 | /**
|
---|
1471 | * Gets the CR3 register value for the PAE shadow memory context.
|
---|
1472 | * @returns CR3 value.
|
---|
1473 | * @param pVM The VM handle.
|
---|
1474 | */
|
---|
1475 | VMMDECL(RTHCPHYS) PGMGetHyperPaeCR3(PVM pVM)
|
---|
1476 | {
|
---|
1477 | #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
|
---|
1478 | return pVM->pgm.s.HCPhysShwCR3;
|
---|
1479 | #else
|
---|
1480 | return pVM->pgm.s.HCPhysShwPaePdpt;
|
---|
1481 | #endif
|
---|
1482 | }
|
---|
1483 |
|
---|
1484 |
|
---|
1485 | /**
|
---|
1486 | * Gets the CR3 register value for the AMD64 shadow memory context.
|
---|
1487 | * @returns CR3 value.
|
---|
1488 | * @param pVM The VM handle.
|
---|
1489 | */
|
---|
1490 | VMMDECL(RTHCPHYS) PGMGetHyperAmd64CR3(PVM pVM)
|
---|
1491 | {
|
---|
1492 | return pVM->pgm.s.HCPhysShwCR3;
|
---|
1493 | }
|
---|
1494 |
|
---|
1495 | /**
|
---|
1496 | * Gets the current CR3 register value for the HC intermediate memory context.
|
---|
1497 | * @returns CR3 value.
|
---|
1498 | * @param pVM The VM handle.
|
---|
1499 | */
|
---|
1500 | VMMDECL(RTHCPHYS) PGMGetInterHCCR3(PVM pVM)
|
---|
1501 | {
|
---|
1502 | switch (pVM->pgm.s.enmHostMode)
|
---|
1503 | {
|
---|
1504 | case SUPPAGINGMODE_32_BIT:
|
---|
1505 | case SUPPAGINGMODE_32_BIT_GLOBAL:
|
---|
1506 | return pVM->pgm.s.HCPhysInterPD;
|
---|
1507 |
|
---|
1508 | case SUPPAGINGMODE_PAE:
|
---|
1509 | case SUPPAGINGMODE_PAE_GLOBAL:
|
---|
1510 | case SUPPAGINGMODE_PAE_NX:
|
---|
1511 | case SUPPAGINGMODE_PAE_GLOBAL_NX:
|
---|
1512 | return pVM->pgm.s.HCPhysInterPaePDPT;
|
---|
1513 |
|
---|
1514 | case SUPPAGINGMODE_AMD64:
|
---|
1515 | case SUPPAGINGMODE_AMD64_GLOBAL:
|
---|
1516 | case SUPPAGINGMODE_AMD64_NX:
|
---|
1517 | case SUPPAGINGMODE_AMD64_GLOBAL_NX:
|
---|
1518 | return pVM->pgm.s.HCPhysInterPaePDPT;
|
---|
1519 |
|
---|
1520 | default:
|
---|
1521 | AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode));
|
---|
1522 | return ~0;
|
---|
1523 | }
|
---|
1524 | }
|
---|
1525 |
|
---|
1526 |
|
---|
1527 | /**
|
---|
1528 | * Gets the current CR3 register value for the RC intermediate memory context.
|
---|
1529 | * @returns CR3 value.
|
---|
1530 | * @param pVM The VM handle.
|
---|
1531 | */
|
---|
1532 | VMMDECL(RTHCPHYS) PGMGetInterRCCR3(PVM pVM)
|
---|
1533 | {
|
---|
1534 | switch (pVM->pgm.s.enmShadowMode)
|
---|
1535 | {
|
---|
1536 | case PGMMODE_32_BIT:
|
---|
1537 | return pVM->pgm.s.HCPhysInterPD;
|
---|
1538 |
|
---|
1539 | case PGMMODE_PAE:
|
---|
1540 | case PGMMODE_PAE_NX:
|
---|
1541 | return pVM->pgm.s.HCPhysInterPaePDPT;
|
---|
1542 |
|
---|
1543 | case PGMMODE_AMD64:
|
---|
1544 | case PGMMODE_AMD64_NX:
|
---|
1545 | return pVM->pgm.s.HCPhysInterPaePML4;
|
---|
1546 |
|
---|
1547 | case PGMMODE_EPT:
|
---|
1548 | case PGMMODE_NESTED:
|
---|
1549 | return 0; /* not relevant */
|
---|
1550 |
|
---|
1551 | default:
|
---|
1552 | AssertMsgFailed(("enmShadowMode=%d\n", pVM->pgm.s.enmShadowMode));
|
---|
1553 | return ~0;
|
---|
1554 | }
|
---|
1555 | }
|
---|
1556 |
|
---|
1557 |
|
---|
1558 | /**
|
---|
1559 | * Gets the CR3 register value for the 32-Bit intermediate memory context.
|
---|
1560 | * @returns CR3 value.
|
---|
1561 | * @param pVM The VM handle.
|
---|
1562 | */
|
---|
1563 | VMMDECL(RTHCPHYS) PGMGetInter32BitCR3(PVM pVM)
|
---|
1564 | {
|
---|
1565 | return pVM->pgm.s.HCPhysInterPD;
|
---|
1566 | }
|
---|
1567 |
|
---|
1568 |
|
---|
1569 | /**
|
---|
1570 | * Gets the CR3 register value for the PAE intermediate memory context.
|
---|
1571 | * @returns CR3 value.
|
---|
1572 | * @param pVM The VM handle.
|
---|
1573 | */
|
---|
1574 | VMMDECL(RTHCPHYS) PGMGetInterPaeCR3(PVM pVM)
|
---|
1575 | {
|
---|
1576 | return pVM->pgm.s.HCPhysInterPaePDPT;
|
---|
1577 | }
|
---|
1578 |
|
---|
1579 |
|
---|
1580 | /**
|
---|
1581 | * Gets the CR3 register value for the AMD64 intermediate memory context.
|
---|
1582 | * @returns CR3 value.
|
---|
1583 | * @param pVM The VM handle.
|
---|
1584 | */
|
---|
1585 | VMMDECL(RTHCPHYS) PGMGetInterAmd64CR3(PVM pVM)
|
---|
1586 | {
|
---|
1587 | return pVM->pgm.s.HCPhysInterPaePML4;
|
---|
1588 | }
|
---|
1589 |
|
---|
1590 |
|
---|
1591 | /**
|
---|
1592 | * Performs and schedules necessary updates following a CR3 load or reload.
|
---|
1593 | *
|
---|
1594 | * This will normally involve mapping the guest PD or nPDPT
|
---|
1595 | *
|
---|
1596 | * @returns VBox status code.
|
---|
1597 | * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
|
---|
1598 | * safely be ignored and overridden since the FF will be set too then.
|
---|
1599 | * @param pVM VM handle.
|
---|
1600 | * @param cr3 The new cr3.
|
---|
1601 | * @param fGlobal Indicates whether this is a global flush or not.
|
---|
1602 | */
|
---|
1603 | VMMDECL(int) PGMFlushTLB(PVM pVM, uint64_t cr3, bool fGlobal)
|
---|
1604 | {
|
---|
1605 | STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLB), a);
|
---|
1606 |
|
---|
1607 | /*
|
---|
1608 | * Always flag the necessary updates; necessary for hardware acceleration
|
---|
1609 | */
|
---|
1610 | /** @todo optimize this, it shouldn't always be necessary. */
|
---|
1611 | VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
|
---|
1612 | if (fGlobal)
|
---|
1613 | VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
|
---|
1614 | LogFlow(("PGMFlushTLB: cr3=%RX64 OldCr3=%RX64 fGlobal=%d\n", cr3, pVM->pgm.s.GCPhysCR3, fGlobal));
|
---|
1615 |
|
---|
1616 | /*
|
---|
1617 | * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
|
---|
1618 | */
|
---|
1619 | int rc = VINF_SUCCESS;
|
---|
1620 | RTGCPHYS GCPhysCR3;
|
---|
1621 | if ( pVM->pgm.s.enmGuestMode == PGMMODE_PAE
|
---|
1622 | || pVM->pgm.s.enmGuestMode == PGMMODE_PAE_NX
|
---|
1623 | || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64
|
---|
1624 | || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64_NX)
|
---|
1625 | GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
|
---|
1626 | else
|
---|
1627 | GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
|
---|
1628 | if (pVM->pgm.s.GCPhysCR3 != GCPhysCR3)
|
---|
1629 | {
|
---|
1630 | #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
|
---|
1631 | /* Unmap the old CR3 value before activating the new one. */
|
---|
1632 | rc = PGM_BTH_PFN(UnmapCR3, pVM)(pVM);
|
---|
1633 | AssertRC(rc);
|
---|
1634 | #endif
|
---|
1635 | RTGCPHYS GCPhysOldCR3 = pVM->pgm.s.GCPhysCR3;
|
---|
1636 | pVM->pgm.s.GCPhysCR3 = GCPhysCR3;
|
---|
1637 | rc = PGM_BTH_PFN(MapCR3, pVM)(pVM, GCPhysCR3);
|
---|
1638 | if (RT_LIKELY(rc == VINF_SUCCESS))
|
---|
1639 | {
|
---|
1640 | if (!pVM->pgm.s.fMappingsFixed)
|
---|
1641 | {
|
---|
1642 | pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
|
---|
1643 | #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
|
---|
1644 | rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, GCPhysCR3);
|
---|
1645 | #endif
|
---|
1646 | }
|
---|
1647 | }
|
---|
1648 | else
|
---|
1649 | {
|
---|
1650 | AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
|
---|
1651 | Assert(VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL | VM_FF_PGM_SYNC_CR3));
|
---|
1652 | pVM->pgm.s.GCPhysCR3 = GCPhysOldCR3;
|
---|
1653 | pVM->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
|
---|
1654 | if (!pVM->pgm.s.fMappingsFixed)
|
---|
1655 | pVM->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
|
---|
1656 | }
|
---|
1657 |
|
---|
1658 | if (fGlobal)
|
---|
1659 | STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLBNewCR3Global));
|
---|
1660 | else
|
---|
1661 | STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLBNewCR3));
|
---|
1662 | }
|
---|
1663 | else
|
---|
1664 | {
|
---|
1665 | /*
|
---|
1666 | * Check if we have a pending update of the CR3 monitoring.
|
---|
1667 | */
|
---|
1668 | if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
|
---|
1669 | {
|
---|
1670 | pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
|
---|
1671 | Assert(!pVM->pgm.s.fMappingsFixed);
|
---|
1672 | #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
|
---|
1673 | rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, GCPhysCR3);
|
---|
1674 | #endif
|
---|
1675 | }
|
---|
1676 | if (fGlobal)
|
---|
1677 | STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLBSameCR3Global));
|
---|
1678 | else
|
---|
1679 | STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLBSameCR3));
|
---|
1680 | }
|
---|
1681 |
|
---|
1682 | STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLB), a);
|
---|
1683 | return rc;
|
---|
1684 | }
|
---|
1685 |
|
---|
1686 |
|
---|
1687 | /**
|
---|
1688 | * Performs and schedules necessary updates following a CR3 load or reload when
|
---|
1689 | * using nested or extended paging.
|
---|
1690 | *
|
---|
1691 | * This API is an alterantive to PDMFlushTLB that avoids actually flushing the
|
---|
1692 | * TLB and triggering a SyncCR3.
|
---|
1693 | *
|
---|
1694 | * This will normally involve mapping the guest PD or nPDPT
|
---|
1695 | *
|
---|
1696 | * @returns VBox status code.
|
---|
1697 | * @retval VINF_SUCCESS.
|
---|
1698 | * @retval (If applied when not in nested mode: VINF_PGM_SYNC_CR3 if monitoring
|
---|
1699 | * requires a CR3 sync. This can safely be ignored and overridden since
|
---|
1700 | * the FF will be set too then.)
|
---|
1701 | * @param pVM VM handle.
|
---|
1702 | * @param cr3 The new cr3.
|
---|
1703 | */
|
---|
1704 | VMMDECL(int) PGMUpdateCR3(PVM pVM, uint64_t cr3)
|
---|
1705 | {
|
---|
1706 | LogFlow(("PGMUpdateCR3: cr3=%RX64 OldCr3=%RX64\n", cr3, pVM->pgm.s.GCPhysCR3));
|
---|
1707 |
|
---|
1708 | /* We assume we're only called in nested paging mode. */
|
---|
1709 | Assert(pVM->pgm.s.fMappingsFixed);
|
---|
1710 | Assert(!(pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3));
|
---|
1711 | Assert(pVM->pgm.s.enmShadowMode == PGMMODE_NESTED || pVM->pgm.s.enmShadowMode == PGMMODE_EPT);
|
---|
1712 |
|
---|
1713 | /*
|
---|
1714 | * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
|
---|
1715 | */
|
---|
1716 | int rc = VINF_SUCCESS;
|
---|
1717 | RTGCPHYS GCPhysCR3;
|
---|
1718 | if ( pVM->pgm.s.enmGuestMode == PGMMODE_PAE
|
---|
1719 | || pVM->pgm.s.enmGuestMode == PGMMODE_PAE_NX
|
---|
1720 | || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64
|
---|
1721 | || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64_NX)
|
---|
1722 | GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
|
---|
1723 | else
|
---|
1724 | GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
|
---|
1725 | if (pVM->pgm.s.GCPhysCR3 != GCPhysCR3)
|
---|
1726 | {
|
---|
1727 | pVM->pgm.s.GCPhysCR3 = GCPhysCR3;
|
---|
1728 | rc = PGM_BTH_PFN(MapCR3, pVM)(pVM, GCPhysCR3);
|
---|
1729 | AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */
|
---|
1730 | }
|
---|
1731 | return rc;
|
---|
1732 | }
|
---|
1733 |
|
---|
1734 |
|
---|
1735 | /**
|
---|
1736 | * Synchronize the paging structures.
|
---|
1737 | *
|
---|
1738 | * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
|
---|
1739 | * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
|
---|
1740 | * in several places, most importantly whenever the CR3 is loaded.
|
---|
1741 | *
|
---|
1742 | * @returns VBox status code.
|
---|
1743 | * @param pVM The virtual machine.
|
---|
1744 | * @param cr0 Guest context CR0 register
|
---|
1745 | * @param cr3 Guest context CR3 register
|
---|
1746 | * @param cr4 Guest context CR4 register
|
---|
1747 | * @param fGlobal Including global page directories or not
|
---|
1748 | */
|
---|
1749 | VMMDECL(int) PGMSyncCR3(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
|
---|
1750 | {
|
---|
1751 | int rc;
|
---|
1752 |
|
---|
1753 | /*
|
---|
1754 | * We might be called when we shouldn't.
|
---|
1755 | *
|
---|
1756 | * The mode switching will ensure that the PD is resynced
|
---|
1757 | * after every mode switch. So, if we find ourselves here
|
---|
1758 | * when in protected or real mode we can safely disable the
|
---|
1759 | * FF and return immediately.
|
---|
1760 | */
|
---|
1761 | if (pVM->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
|
---|
1762 | {
|
---|
1763 | Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
|
---|
1764 | VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3);
|
---|
1765 | VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
|
---|
1766 | return VINF_SUCCESS;
|
---|
1767 | }
|
---|
1768 |
|
---|
1769 | /* If global pages are not supported, then all flushes are global. */
|
---|
1770 | if (!(cr4 & X86_CR4_PGE))
|
---|
1771 | fGlobal = true;
|
---|
1772 | LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
|
---|
1773 | VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3), VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL)));
|
---|
1774 |
|
---|
1775 | #ifdef PGMPOOL_WITH_MONITORING
|
---|
1776 | /*
|
---|
1777 | * The pool may have pending stuff and even require a return to ring-3 to
|
---|
1778 | * clear the whole thing.
|
---|
1779 | */
|
---|
1780 | rc = pgmPoolSyncCR3(pVM);
|
---|
1781 | if (rc != VINF_SUCCESS)
|
---|
1782 | return rc;
|
---|
1783 | #endif
|
---|
1784 |
|
---|
1785 | /*
|
---|
1786 | * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
|
---|
1787 | * This should be done before SyncCR3.
|
---|
1788 | */
|
---|
1789 | if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
|
---|
1790 | {
|
---|
1791 | pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
|
---|
1792 |
|
---|
1793 | RTGCPHYS GCPhysCR3Old = pVM->pgm.s.GCPhysCR3;
|
---|
1794 | RTGCPHYS GCPhysCR3;
|
---|
1795 | if ( pVM->pgm.s.enmGuestMode == PGMMODE_PAE
|
---|
1796 | || pVM->pgm.s.enmGuestMode == PGMMODE_PAE_NX
|
---|
1797 | || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64
|
---|
1798 | || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64_NX)
|
---|
1799 | GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
|
---|
1800 | else
|
---|
1801 | GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
|
---|
1802 |
|
---|
1803 | #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
|
---|
1804 | if (pVM->pgm.s.GCPhysCR3 != GCPhysCR3)
|
---|
1805 | {
|
---|
1806 | /* Unmap the old CR3 value before activating the new one. */
|
---|
1807 | rc = PGM_BTH_PFN(UnmapCR3, pVM)(pVM);
|
---|
1808 | AssertRC(rc);
|
---|
1809 | }
|
---|
1810 | #endif
|
---|
1811 |
|
---|
1812 | pVM->pgm.s.GCPhysCR3 = GCPhysCR3;
|
---|
1813 | rc = PGM_BTH_PFN(MapCR3, pVM)(pVM, GCPhysCR3);
|
---|
1814 | #ifdef IN_RING3
|
---|
1815 | if (rc == VINF_PGM_SYNC_CR3)
|
---|
1816 | rc = pgmPoolSyncCR3(pVM);
|
---|
1817 | #else
|
---|
1818 | if (rc == VINF_PGM_SYNC_CR3)
|
---|
1819 | {
|
---|
1820 | pVM->pgm.s.GCPhysCR3 = GCPhysCR3Old;
|
---|
1821 | return rc;
|
---|
1822 | }
|
---|
1823 | #endif
|
---|
1824 | AssertRCReturn(rc, rc);
|
---|
1825 | AssertRCSuccessReturn(rc, VERR_INTERNAL_ERROR);
|
---|
1826 | }
|
---|
1827 |
|
---|
1828 | /*
|
---|
1829 | * Let the 'Bth' function do the work and we'll just keep track of the flags.
|
---|
1830 | */
|
---|
1831 | STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
|
---|
1832 | rc = PGM_BTH_PFN(SyncCR3, pVM)(pVM, cr0, cr3, cr4, fGlobal);
|
---|
1833 | STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
|
---|
1834 | AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
|
---|
1835 | if (rc == VINF_SUCCESS)
|
---|
1836 | {
|
---|
1837 | if (!(pVM->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
|
---|
1838 | {
|
---|
1839 | VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3);
|
---|
1840 | VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
|
---|
1841 | }
|
---|
1842 |
|
---|
1843 | /*
|
---|
1844 | * Check if we have a pending update of the CR3 monitoring.
|
---|
1845 | */
|
---|
1846 | if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
|
---|
1847 | {
|
---|
1848 | pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
|
---|
1849 | Assert(!pVM->pgm.s.fMappingsFixed);
|
---|
1850 | #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
|
---|
1851 | Assert(pVM->pgm.s.GCPhysCR3 == pVM->pgm.s.GCPhysGstCR3Monitored);
|
---|
1852 | rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);
|
---|
1853 | #endif
|
---|
1854 | }
|
---|
1855 | }
|
---|
1856 |
|
---|
1857 | /*
|
---|
1858 | * Now flush the CR3 (guest context).
|
---|
1859 | */
|
---|
1860 | if (rc == VINF_SUCCESS)
|
---|
1861 | PGM_INVL_GUEST_TLBS();
|
---|
1862 | return rc;
|
---|
1863 | }
|
---|
1864 |
|
---|
1865 |
|
---|
1866 | /**
|
---|
1867 | * Called whenever CR0 or CR4 in a way which may change
|
---|
1868 | * the paging mode.
|
---|
1869 | *
|
---|
1870 | * @returns VBox status code fit for scheduling in GC and R0.
|
---|
1871 | * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
|
---|
1872 | * @retval VINF_PGM_CHANGE_MODE if we're in GC or R0 and the mode changes.
|
---|
1873 | * @param pVM VM handle.
|
---|
1874 | * @param cr0 The new cr0.
|
---|
1875 | * @param cr4 The new cr4.
|
---|
1876 | * @param efer The new extended feature enable register.
|
---|
1877 | */
|
---|
1878 | VMMDECL(int) PGMChangeMode(PVM pVM, uint64_t cr0, uint64_t cr4, uint64_t efer)
|
---|
1879 | {
|
---|
1880 | PGMMODE enmGuestMode;
|
---|
1881 |
|
---|
1882 | /*
|
---|
1883 | * Calc the new guest mode.
|
---|
1884 | */
|
---|
1885 | if (!(cr0 & X86_CR0_PE))
|
---|
1886 | enmGuestMode = PGMMODE_REAL;
|
---|
1887 | else if (!(cr0 & X86_CR0_PG))
|
---|
1888 | enmGuestMode = PGMMODE_PROTECTED;
|
---|
1889 | else if (!(cr4 & X86_CR4_PAE))
|
---|
1890 | enmGuestMode = PGMMODE_32_BIT;
|
---|
1891 | else if (!(efer & MSR_K6_EFER_LME))
|
---|
1892 | {
|
---|
1893 | if (!(efer & MSR_K6_EFER_NXE))
|
---|
1894 | enmGuestMode = PGMMODE_PAE;
|
---|
1895 | else
|
---|
1896 | enmGuestMode = PGMMODE_PAE_NX;
|
---|
1897 | }
|
---|
1898 | else
|
---|
1899 | {
|
---|
1900 | if (!(efer & MSR_K6_EFER_NXE))
|
---|
1901 | enmGuestMode = PGMMODE_AMD64;
|
---|
1902 | else
|
---|
1903 | enmGuestMode = PGMMODE_AMD64_NX;
|
---|
1904 | }
|
---|
1905 |
|
---|
1906 | /*
|
---|
1907 | * Did it change?
|
---|
1908 | */
|
---|
1909 | if (pVM->pgm.s.enmGuestMode == enmGuestMode)
|
---|
1910 | return VINF_SUCCESS;
|
---|
1911 |
|
---|
1912 | /* Flush the TLB */
|
---|
1913 | PGM_INVL_GUEST_TLBS();
|
---|
1914 |
|
---|
1915 | #ifdef IN_RING3
|
---|
1916 | return PGMR3ChangeMode(pVM, enmGuestMode);
|
---|
1917 | #else
|
---|
1918 | LogFlow(("PGMChangeMode: returns VINF_PGM_CHANGE_MODE.\n"));
|
---|
1919 | return VINF_PGM_CHANGE_MODE;
|
---|
1920 | #endif
|
---|
1921 | }
|
---|
1922 |
|
---|
1923 |
|
---|
1924 | /**
|
---|
1925 | * Gets the current guest paging mode.
|
---|
1926 | *
|
---|
1927 | * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
|
---|
1928 | *
|
---|
1929 | * @returns The current paging mode.
|
---|
1930 | * @param pVM The VM handle.
|
---|
1931 | */
|
---|
1932 | VMMDECL(PGMMODE) PGMGetGuestMode(PVM pVM)
|
---|
1933 | {
|
---|
1934 | return pVM->pgm.s.enmGuestMode;
|
---|
1935 | }
|
---|
1936 |
|
---|
1937 |
|
---|
1938 | /**
|
---|
1939 | * Gets the current shadow paging mode.
|
---|
1940 | *
|
---|
1941 | * @returns The current paging mode.
|
---|
1942 | * @param pVM The VM handle.
|
---|
1943 | */
|
---|
1944 | VMMDECL(PGMMODE) PGMGetShadowMode(PVM pVM)
|
---|
1945 | {
|
---|
1946 | return pVM->pgm.s.enmShadowMode;
|
---|
1947 | }
|
---|
1948 |
|
---|
1949 | /**
|
---|
1950 | * Gets the current host paging mode.
|
---|
1951 | *
|
---|
1952 | * @returns The current paging mode.
|
---|
1953 | * @param pVM The VM handle.
|
---|
1954 | */
|
---|
1955 | VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
|
---|
1956 | {
|
---|
1957 | switch (pVM->pgm.s.enmHostMode)
|
---|
1958 | {
|
---|
1959 | case SUPPAGINGMODE_32_BIT:
|
---|
1960 | case SUPPAGINGMODE_32_BIT_GLOBAL:
|
---|
1961 | return PGMMODE_32_BIT;
|
---|
1962 |
|
---|
1963 | case SUPPAGINGMODE_PAE:
|
---|
1964 | case SUPPAGINGMODE_PAE_GLOBAL:
|
---|
1965 | return PGMMODE_PAE;
|
---|
1966 |
|
---|
1967 | case SUPPAGINGMODE_PAE_NX:
|
---|
1968 | case SUPPAGINGMODE_PAE_GLOBAL_NX:
|
---|
1969 | return PGMMODE_PAE_NX;
|
---|
1970 |
|
---|
1971 | case SUPPAGINGMODE_AMD64:
|
---|
1972 | case SUPPAGINGMODE_AMD64_GLOBAL:
|
---|
1973 | return PGMMODE_AMD64;
|
---|
1974 |
|
---|
1975 | case SUPPAGINGMODE_AMD64_NX:
|
---|
1976 | case SUPPAGINGMODE_AMD64_GLOBAL_NX:
|
---|
1977 | return PGMMODE_AMD64_NX;
|
---|
1978 |
|
---|
1979 | default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
|
---|
1980 | }
|
---|
1981 |
|
---|
1982 | return PGMMODE_INVALID;
|
---|
1983 | }
|
---|
1984 |
|
---|
1985 |
|
---|
1986 | /**
|
---|
1987 | * Get mode name.
|
---|
1988 | *
|
---|
1989 | * @returns read-only name string.
|
---|
1990 | * @param enmMode The mode which name is desired.
|
---|
1991 | */
|
---|
1992 | VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
|
---|
1993 | {
|
---|
1994 | switch (enmMode)
|
---|
1995 | {
|
---|
1996 | case PGMMODE_REAL: return "Real";
|
---|
1997 | case PGMMODE_PROTECTED: return "Protected";
|
---|
1998 | case PGMMODE_32_BIT: return "32-bit";
|
---|
1999 | case PGMMODE_PAE: return "PAE";
|
---|
2000 | case PGMMODE_PAE_NX: return "PAE+NX";
|
---|
2001 | case PGMMODE_AMD64: return "AMD64";
|
---|
2002 | case PGMMODE_AMD64_NX: return "AMD64+NX";
|
---|
2003 | case PGMMODE_NESTED: return "Nested";
|
---|
2004 | case PGMMODE_EPT: return "EPT";
|
---|
2005 | default: return "unknown mode value";
|
---|
2006 | }
|
---|
2007 | }
|
---|
2008 |
|
---|
2009 |
|
---|
2010 | /**
|
---|
2011 | * Acquire the PGM lock.
|
---|
2012 | *
|
---|
2013 | * @returns VBox status code
|
---|
2014 | * @param pVM The VM to operate on.
|
---|
2015 | */
|
---|
2016 | int pgmLock(PVM pVM)
|
---|
2017 | {
|
---|
2018 | int rc = PDMCritSectEnter(&pVM->pgm.s.CritSect, VERR_SEM_BUSY);
|
---|
2019 | #ifdef IN_RC
|
---|
2020 | if (rc == VERR_SEM_BUSY)
|
---|
2021 | rc = VMMGCCallHost(pVM, VMMCALLHOST_PGM_LOCK, 0);
|
---|
2022 | #elif defined(IN_RING0)
|
---|
2023 | if (rc == VERR_SEM_BUSY)
|
---|
2024 | rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_LOCK, 0);
|
---|
2025 | #endif
|
---|
2026 | AssertRC(rc);
|
---|
2027 | return rc;
|
---|
2028 | }
|
---|
2029 |
|
---|
2030 |
|
---|
2031 | /**
|
---|
2032 | * Release the PGM lock.
|
---|
2033 | *
|
---|
2034 | * @returns VBox status code
|
---|
2035 | * @param pVM The VM to operate on.
|
---|
2036 | */
|
---|
2037 | void pgmUnlock(PVM pVM)
|
---|
2038 | {
|
---|
2039 | PDMCritSectLeave(&pVM->pgm.s.CritSect);
|
---|
2040 | }
|
---|
2041 |
|
---|
2042 | #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
|
---|
2043 |
|
---|
2044 | /**
|
---|
2045 | * Temporarily maps one guest page specified by GC physical address.
|
---|
2046 | * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
|
---|
2047 | *
|
---|
2048 | * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
|
---|
2049 | * reused after 8 mappings (or perhaps a few more if you score with the cache).
|
---|
2050 | *
|
---|
2051 | * @returns VBox status.
|
---|
2052 | * @param pVM VM handle.
|
---|
2053 | * @param GCPhys GC Physical address of the page.
|
---|
2054 | * @param ppv Where to store the address of the mapping.
|
---|
2055 | */
|
---|
2056 | VMMDECL(int) PGMDynMapGCPage(PVM pVM, RTGCPHYS GCPhys, void **ppv)
|
---|
2057 | {
|
---|
2058 | AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("GCPhys=%RGp\n", GCPhys));
|
---|
2059 |
|
---|
2060 | /*
|
---|
2061 | * Get the ram range.
|
---|
2062 | */
|
---|
2063 | PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
|
---|
2064 | while (pRam && GCPhys - pRam->GCPhys >= pRam->cb)
|
---|
2065 | pRam = pRam->CTX_SUFF(pNext);
|
---|
2066 | if (!pRam)
|
---|
2067 | {
|
---|
2068 | AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
|
---|
2069 | return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
|
---|
2070 | }
|
---|
2071 |
|
---|
2072 | /*
|
---|
2073 | * Pass it on to PGMDynMapHCPage.
|
---|
2074 | */
|
---|
2075 | RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]);
|
---|
2076 | //Log(("PGMDynMapGCPage: GCPhys=%RGp HCPhys=%RHp\n", GCPhys, HCPhys));
|
---|
2077 | #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
|
---|
2078 | pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
|
---|
2079 | #else
|
---|
2080 | PGMDynMapHCPage(pVM, HCPhys, ppv);
|
---|
2081 | #endif
|
---|
2082 | return VINF_SUCCESS;
|
---|
2083 | }
|
---|
2084 |
|
---|
2085 |
|
---|
2086 | /**
|
---|
2087 | * Temporarily maps one guest page specified by unaligned GC physical address.
|
---|
2088 | * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
|
---|
2089 | *
|
---|
2090 | * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
|
---|
2091 | * reused after 8 mappings (or perhaps a few more if you score with the cache).
|
---|
2092 | *
|
---|
2093 | * The caller is aware that only the speicifed page is mapped and that really bad things
|
---|
2094 | * will happen if writing beyond the page!
|
---|
2095 | *
|
---|
2096 | * @returns VBox status.
|
---|
2097 | * @param pVM VM handle.
|
---|
2098 | * @param GCPhys GC Physical address within the page to be mapped.
|
---|
2099 | * @param ppv Where to store the address of the mapping address corresponding to GCPhys.
|
---|
2100 | */
|
---|
2101 | VMMDECL(int) PGMDynMapGCPageOff(PVM pVM, RTGCPHYS GCPhys, void **ppv)
|
---|
2102 | {
|
---|
2103 | /*
|
---|
2104 | * Get the ram range.
|
---|
2105 | */
|
---|
2106 | PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
|
---|
2107 | while (pRam && GCPhys - pRam->GCPhys >= pRam->cb)
|
---|
2108 | pRam = pRam->CTX_SUFF(pNext);
|
---|
2109 | if (!pRam)
|
---|
2110 | {
|
---|
2111 | AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
|
---|
2112 | return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
|
---|
2113 | }
|
---|
2114 |
|
---|
2115 | /*
|
---|
2116 | * Pass it on to PGMDynMapHCPage.
|
---|
2117 | */
|
---|
2118 | RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]);
|
---|
2119 | #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
|
---|
2120 | pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
|
---|
2121 | #else
|
---|
2122 | PGMDynMapHCPage(pVM, HCPhys, ppv);
|
---|
2123 | #endif
|
---|
2124 | *ppv = (void *)((uintptr_t)*ppv | (GCPhys & PAGE_OFFSET_MASK));
|
---|
2125 | return VINF_SUCCESS;
|
---|
2126 | }
|
---|
2127 |
|
---|
2128 |
|
---|
2129 | # ifdef IN_RC
|
---|
2130 | /**
|
---|
2131 | * Temporarily maps one host page specified by HC physical address.
|
---|
2132 | *
|
---|
2133 | * Be WARNED that the dynamic page mapping area is small, 16 pages, thus the space is
|
---|
2134 | * reused after 16 mappings (or perhaps a few more if you score with the cache).
|
---|
2135 | *
|
---|
2136 | * @returns VINF_SUCCESS, will bail out to ring-3 on failure.
|
---|
2137 | * @param pVM VM handle.
|
---|
2138 | * @param HCPhys HC Physical address of the page.
|
---|
2139 | * @param ppv Where to store the address of the mapping. This is the
|
---|
2140 | * address of the PAGE not the exact address corresponding
|
---|
2141 | * to HCPhys. Use PGMDynMapHCPageOff if you care for the
|
---|
2142 | * page offset.
|
---|
2143 | */
|
---|
2144 | VMMDECL(int) PGMDynMapHCPage(PVM pVM, RTHCPHYS HCPhys, void **ppv)
|
---|
2145 | {
|
---|
2146 | AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%RHp\n", HCPhys));
|
---|
2147 |
|
---|
2148 | /*
|
---|
2149 | * Check the cache.
|
---|
2150 | */
|
---|
2151 | register unsigned iCache;
|
---|
2152 | for (iCache = 0;iCache < RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache);iCache++)
|
---|
2153 | {
|
---|
2154 | static const uint8_t au8Trans[MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT][RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache)] =
|
---|
2155 | {
|
---|
2156 | { 0, 9, 10, 11, 12, 13, 14, 15},
|
---|
2157 | { 0, 1, 10, 11, 12, 13, 14, 15},
|
---|
2158 | { 0, 1, 2, 11, 12, 13, 14, 15},
|
---|
2159 | { 0, 1, 2, 3, 12, 13, 14, 15},
|
---|
2160 | { 0, 1, 2, 3, 4, 13, 14, 15},
|
---|
2161 | { 0, 1, 2, 3, 4, 5, 14, 15},
|
---|
2162 | { 0, 1, 2, 3, 4, 5, 6, 15},
|
---|
2163 | { 0, 1, 2, 3, 4, 5, 6, 7},
|
---|
2164 | { 8, 1, 2, 3, 4, 5, 6, 7},
|
---|
2165 | { 8, 9, 2, 3, 4, 5, 6, 7},
|
---|
2166 | { 8, 9, 10, 3, 4, 5, 6, 7},
|
---|
2167 | { 8, 9, 10, 11, 4, 5, 6, 7},
|
---|
2168 | { 8, 9, 10, 11, 12, 5, 6, 7},
|
---|
2169 | { 8, 9, 10, 11, 12, 13, 6, 7},
|
---|
2170 | { 8, 9, 10, 11, 12, 13, 14, 7},
|
---|
2171 | { 8, 9, 10, 11, 12, 13, 14, 15},
|
---|
2172 | };
|
---|
2173 | AssertCompile(RT_ELEMENTS(au8Trans) == 16);
|
---|
2174 | AssertCompile(RT_ELEMENTS(au8Trans[0]) == 8);
|
---|
2175 |
|
---|
2176 | if (pVM->pgm.s.aHCPhysDynPageMapCache[iCache] == HCPhys)
|
---|
2177 | {
|
---|
2178 | int iPage = au8Trans[pVM->pgm.s.iDynPageMapLast][iCache];
|
---|
2179 | void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);
|
---|
2180 | *ppv = pv;
|
---|
2181 | STAM_COUNTER_INC(&pVM->pgm.s.StatRCDynMapCacheHits);
|
---|
2182 | Log4(("PGMGCDynMapHCPage: HCPhys=%RHp pv=%p iPage=%d iCache=%d\n", HCPhys, pv, iPage, iCache));
|
---|
2183 | return VINF_SUCCESS;
|
---|
2184 | }
|
---|
2185 | }
|
---|
2186 | AssertCompile(RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) == 8);
|
---|
2187 | AssertCompile((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) == 16);
|
---|
2188 | STAM_COUNTER_INC(&pVM->pgm.s.StatRCDynMapCacheMisses);
|
---|
2189 |
|
---|
2190 | /*
|
---|
2191 | * Update the page tables.
|
---|
2192 | */
|
---|
2193 | register unsigned iPage = pVM->pgm.s.iDynPageMapLast;
|
---|
2194 | # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
|
---|
2195 | unsigned i;
|
---|
2196 | for (i=0;i<(MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT);i++)
|
---|
2197 | {
|
---|
2198 | pVM->pgm.s.iDynPageMapLast = iPage = (iPage + 1) & ((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) - 1);
|
---|
2199 | if (!(pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u & PGM_PTFLAGS_DYN_LOCKED))
|
---|
2200 | break;
|
---|
2201 | iPage++;
|
---|
2202 | }
|
---|
2203 | AssertRelease(i != (MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT));
|
---|
2204 | # else
|
---|
2205 | pVM->pgm.s.iDynPageMapLast = iPage = (iPage + 1) & ((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) - 1);
|
---|
2206 | # endif
|
---|
2207 |
|
---|
2208 | pVM->pgm.s.aHCPhysDynPageMapCache[iPage & (RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) - 1)] = HCPhys;
|
---|
2209 | pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u = (uint32_t)HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;
|
---|
2210 | pVM->pgm.s.paDynPageMapPaePTEsGC[iPage].u = HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;
|
---|
2211 |
|
---|
2212 | void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);
|
---|
2213 | *ppv = pv;
|
---|
2214 | ASMInvalidatePage(pv);
|
---|
2215 | Log4(("PGMGCDynMapHCPage: HCPhys=%RHp pv=%p iPage=%d\n", HCPhys, pv, iPage));
|
---|
2216 | return VINF_SUCCESS;
|
---|
2217 | }
|
---|
2218 |
|
---|
2219 | /**
|
---|
2220 | * Temporarily lock a dynamic page to prevent it from being reused.
|
---|
2221 | *
|
---|
2222 | * @returns VINF_SUCCESS, will bail out to ring-3 on failure.
|
---|
2223 | * @param pVM VM handle.
|
---|
2224 | * @param GCPage GC address of page
|
---|
2225 | */
|
---|
2226 | VMMDECL(int) PGMDynLockHCPage(PVM pVM, RCPTRTYPE(uint8_t *) GCPage)
|
---|
2227 | {
|
---|
2228 | unsigned iPage;
|
---|
2229 |
|
---|
2230 | Assert(GCPage >= pVM->pgm.s.pbDynPageMapBaseGC && GCPage < (pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE));
|
---|
2231 | iPage = ((uintptr_t)(GCPage - pVM->pgm.s.pbDynPageMapBaseGC)) >> PAGE_SHIFT;
|
---|
2232 | Assert(!(pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u & PGM_PTFLAGS_DYN_LOCKED));
|
---|
2233 | pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u |= PGM_PTFLAGS_DYN_LOCKED;
|
---|
2234 | return VINF_SUCCESS;
|
---|
2235 | }
|
---|
2236 |
|
---|
2237 | /**
|
---|
2238 | * Unlock a dynamic page
|
---|
2239 | *
|
---|
2240 | * @returns VINF_SUCCESS, will bail out to ring-3 on failure.
|
---|
2241 | * @param pVM VM handle.
|
---|
2242 | * @param GCPage GC address of page
|
---|
2243 | */
|
---|
2244 | VMMDECL(int) PGMDynUnlockHCPage(PVM pVM, RCPTRTYPE(uint8_t *) GCPage)
|
---|
2245 | {
|
---|
2246 | unsigned iPage;
|
---|
2247 |
|
---|
2248 | Assert(GCPage >= pVM->pgm.s.pbDynPageMapBaseGC && GCPage < (pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE));
|
---|
2249 | iPage = ((uintptr_t)(GCPage - pVM->pgm.s.pbDynPageMapBaseGC)) >> PAGE_SHIFT;
|
---|
2250 | Assert(pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u & PGM_PTFLAGS_DYN_LOCKED);
|
---|
2251 | pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u &= ~PGM_PTFLAGS_DYN_LOCKED;
|
---|
2252 | return VINF_SUCCESS;
|
---|
2253 | }
|
---|
2254 |
|
---|
2255 | # ifdef VBOX_STRICT
|
---|
2256 | /**
|
---|
2257 | * Check for lock leaks.
|
---|
2258 | *
|
---|
2259 | * @param pVM VM handle.
|
---|
2260 | */
|
---|
2261 | VMMDECL(void) PGMDynCheckLocks(PVM pVM)
|
---|
2262 | {
|
---|
2263 | for (unsigned i=0;i<(MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT);i++)
|
---|
2264 | Assert(!(pVM->pgm.s.paDynPageMap32BitPTEsGC[i].u & PGM_PTFLAGS_DYN_LOCKED));
|
---|
2265 | }
|
---|
2266 | # endif
|
---|
2267 | # endif /* IN_RC */
|
---|
2268 |
|
---|
2269 | #endif /* IN_RC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
|
---|
2270 | #ifdef VBOX_STRICT
|
---|
2271 |
|
---|
2272 | /**
|
---|
2273 | * Asserts that there are no mapping conflicts.
|
---|
2274 | *
|
---|
2275 | * @returns Number of conflicts.
|
---|
2276 | * @param pVM The VM Handle.
|
---|
2277 | */
|
---|
2278 | VMMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM)
|
---|
2279 | {
|
---|
2280 | unsigned cErrors = 0;
|
---|
2281 |
|
---|
2282 | /*
|
---|
2283 | * Check for mapping conflicts.
|
---|
2284 | */
|
---|
2285 | for (PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
|
---|
2286 | pMapping;
|
---|
2287 | pMapping = pMapping->CTX_SUFF(pNext))
|
---|
2288 | {
|
---|
2289 | /** @todo This is slow and should be optimized, but since it's just assertions I don't care now. */
|
---|
2290 | for (RTGCPTR GCPtr = pMapping->GCPtr;
|
---|
2291 | GCPtr <= pMapping->GCPtrLast;
|
---|
2292 | GCPtr += PAGE_SIZE)
|
---|
2293 | {
|
---|
2294 | int rc = PGMGstGetPage(pVM, (RTGCPTR)GCPtr, NULL, NULL);
|
---|
2295 | if (rc != VERR_PAGE_TABLE_NOT_PRESENT)
|
---|
2296 | {
|
---|
2297 | AssertMsgFailed(("Conflict at %RGv with %s\n", GCPtr, R3STRING(pMapping->pszDesc)));
|
---|
2298 | cErrors++;
|
---|
2299 | break;
|
---|
2300 | }
|
---|
2301 | }
|
---|
2302 | }
|
---|
2303 |
|
---|
2304 | return cErrors;
|
---|
2305 | }
|
---|
2306 |
|
---|
2307 |
|
---|
2308 | /**
|
---|
2309 | * Asserts that everything related to the guest CR3 is correctly shadowed.
|
---|
2310 | *
|
---|
2311 | * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
|
---|
2312 | * and assert the correctness of the guest CR3 mapping before asserting that the
|
---|
2313 | * shadow page tables is in sync with the guest page tables.
|
---|
2314 | *
|
---|
2315 | * @returns Number of conflicts.
|
---|
2316 | * @param pVM The VM Handle.
|
---|
2317 | * @param cr3 The current guest CR3 register value.
|
---|
2318 | * @param cr4 The current guest CR4 register value.
|
---|
2319 | */
|
---|
2320 | VMMDECL(unsigned) PGMAssertCR3(PVM pVM, uint64_t cr3, uint64_t cr4)
|
---|
2321 | {
|
---|
2322 | STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
|
---|
2323 | unsigned cErrors = PGM_BTH_PFN(AssertCR3, pVM)(pVM, cr3, cr4, 0, ~(RTGCPTR)0);
|
---|
2324 | STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
|
---|
2325 | return cErrors;
|
---|
2326 | }
|
---|
2327 |
|
---|
2328 | #endif /* VBOX_STRICT */
|
---|