1 | /* $Id: PGMInline.h 56626 2015-06-24 19:38:41Z vboxsync $ */
|
---|
2 | /** @file
|
---|
3 | * PGM - Inlined functions.
|
---|
4 | */
|
---|
5 |
|
---|
6 | /*
|
---|
7 | * Copyright (C) 2006-2015 Oracle Corporation
|
---|
8 | *
|
---|
9 | * This file is part of VirtualBox Open Source Edition (OSE), as
|
---|
10 | * available from http://www.virtualbox.org. This file is free software;
|
---|
11 | * you can redistribute it and/or modify it under the terms of the GNU
|
---|
12 | * General Public License (GPL) as published by the Free Software
|
---|
13 | * Foundation, in version 2 as it comes in the "COPYING" file of the
|
---|
14 | * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
|
---|
15 | * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
|
---|
16 | */
|
---|
17 |
|
---|
18 | #ifndef ___PGMInline_h
|
---|
19 | #define ___PGMInline_h
|
---|
20 |
|
---|
21 | #include <VBox/cdefs.h>
|
---|
22 | #include <VBox/types.h>
|
---|
23 | #include <VBox/err.h>
|
---|
24 | #include <VBox/vmm/stam.h>
|
---|
25 | #include <VBox/param.h>
|
---|
26 | #include <VBox/vmm/vmm.h>
|
---|
27 | #include <VBox/vmm/mm.h>
|
---|
28 | #include <VBox/vmm/pdmcritsect.h>
|
---|
29 | #include <VBox/vmm/pdmapi.h>
|
---|
30 | #include <VBox/dis.h>
|
---|
31 | #include <VBox/vmm/dbgf.h>
|
---|
32 | #include <VBox/log.h>
|
---|
33 | #include <VBox/vmm/gmm.h>
|
---|
34 | #include <VBox/vmm/hm.h>
|
---|
35 | #include <iprt/asm.h>
|
---|
36 | #include <iprt/assert.h>
|
---|
37 | #include <iprt/avl.h>
|
---|
38 | #include <iprt/critsect.h>
|
---|
39 | #include <iprt/sha.h>
|
---|
40 |
|
---|
41 |
|
---|
42 |
|
---|
43 | /** @addtogroup grp_pgm_int Internals
|
---|
44 | * @internal
|
---|
45 | * @{
|
---|
46 | */
|
---|
47 |
|
---|
48 | /**
|
---|
49 | * Gets the PGMRAMRANGE structure for a guest page.
|
---|
50 | *
|
---|
51 | * @returns Pointer to the RAM range on success.
|
---|
52 | * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
|
---|
53 | *
|
---|
54 | * @param pVM Pointer to the VM.
|
---|
55 | * @param GCPhys The GC physical address.
|
---|
56 | */
|
---|
57 | DECLINLINE(PPGMRAMRANGE) pgmPhysGetRange(PVM pVM, RTGCPHYS GCPhys)
|
---|
58 | {
|
---|
59 | PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
|
---|
60 | if (!pRam || GCPhys - pRam->GCPhys >= pRam->cb)
|
---|
61 | pRam = pgmPhysGetRangeSlow(pVM, GCPhys);
|
---|
62 | STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits));
|
---|
63 | return pRam;
|
---|
64 | }
|
---|
65 |
|
---|
66 |
|
---|
67 | /**
|
---|
68 | * Gets the PGMRAMRANGE structure for a guest page, if unassigned get the ram
|
---|
69 | * range above it.
|
---|
70 | *
|
---|
71 | * @returns Pointer to the RAM range on success.
|
---|
72 | * @returns NULL if the address is located after the last range.
|
---|
73 | *
|
---|
74 | * @param pVM Pointer to the VM.
|
---|
75 | * @param GCPhys The GC physical address.
|
---|
76 | */
|
---|
77 | DECLINLINE(PPGMRAMRANGE) pgmPhysGetRangeAtOrAbove(PVM pVM, RTGCPHYS GCPhys)
|
---|
78 | {
|
---|
79 | PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
|
---|
80 | if ( !pRam
|
---|
81 | || (GCPhys - pRam->GCPhys) >= pRam->cb)
|
---|
82 | return pgmPhysGetRangeAtOrAboveSlow(pVM, GCPhys);
|
---|
83 | STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits));
|
---|
84 | return pRam;
|
---|
85 | }
|
---|
86 |
|
---|
87 |
|
---|
88 | /**
|
---|
89 | * Gets the PGMPAGE structure for a guest page.
|
---|
90 | *
|
---|
91 | * @returns Pointer to the page on success.
|
---|
92 | * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
|
---|
93 | *
|
---|
94 | * @param pVM Pointer to the VM.
|
---|
95 | * @param GCPhys The GC physical address.
|
---|
96 | */
|
---|
97 | DECLINLINE(PPGMPAGE) pgmPhysGetPage(PVM pVM, RTGCPHYS GCPhys)
|
---|
98 | {
|
---|
99 | PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
|
---|
100 | RTGCPHYS off;
|
---|
101 | if ( !pRam
|
---|
102 | || (off = GCPhys - pRam->GCPhys) >= pRam->cb)
|
---|
103 | return pgmPhysGetPageSlow(pVM, GCPhys);
|
---|
104 | STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits));
|
---|
105 | return &pRam->aPages[off >> PAGE_SHIFT];
|
---|
106 | }
|
---|
107 |
|
---|
108 |
|
---|
109 | /**
|
---|
110 | * Gets the PGMPAGE structure for a guest page.
|
---|
111 | *
|
---|
112 | * Old Phys code: Will make sure the page is present.
|
---|
113 | *
|
---|
114 | * @returns VBox status code.
|
---|
115 | * @retval VINF_SUCCESS and a valid *ppPage on success.
|
---|
116 | * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
|
---|
117 | *
|
---|
118 | * @param pVM Pointer to the VM.
|
---|
119 | * @param GCPhys The GC physical address.
|
---|
120 | * @param ppPage Where to store the page pointer on success.
|
---|
121 | */
|
---|
122 | DECLINLINE(int) pgmPhysGetPageEx(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
|
---|
123 | {
|
---|
124 | PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
|
---|
125 | RTGCPHYS off;
|
---|
126 | if ( !pRam
|
---|
127 | || (off = GCPhys - pRam->GCPhys) >= pRam->cb)
|
---|
128 | return pgmPhysGetPageExSlow(pVM, GCPhys, ppPage);
|
---|
129 | *ppPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
|
---|
130 | STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits));
|
---|
131 | return VINF_SUCCESS;
|
---|
132 | }
|
---|
133 |
|
---|
134 |
|
---|
135 | /**
|
---|
136 | * Gets the PGMPAGE structure for a guest page.
|
---|
137 | *
|
---|
138 | * Old Phys code: Will make sure the page is present.
|
---|
139 | *
|
---|
140 | * @returns VBox status code.
|
---|
141 | * @retval VINF_SUCCESS and a valid *ppPage on success.
|
---|
142 | * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
|
---|
143 | *
|
---|
144 | * @param pVM Pointer to the VM.
|
---|
145 | * @param GCPhys The GC physical address.
|
---|
146 | * @param ppPage Where to store the page pointer on success.
|
---|
147 | * @param ppRamHint Where to read and store the ram list hint.
|
---|
148 | * The caller initializes this to NULL before the call.
|
---|
149 | */
|
---|
150 | DECLINLINE(int) pgmPhysGetPageWithHintEx(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRamHint)
|
---|
151 | {
|
---|
152 | RTGCPHYS off;
|
---|
153 | PPGMRAMRANGE pRam = *ppRamHint;
|
---|
154 | if ( !pRam
|
---|
155 | || RT_UNLIKELY((off = GCPhys - pRam->GCPhys) >= pRam->cb))
|
---|
156 | {
|
---|
157 | pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
|
---|
158 | if ( !pRam
|
---|
159 | || (off = GCPhys - pRam->GCPhys) >= pRam->cb)
|
---|
160 | return pgmPhysGetPageAndRangeExSlow(pVM, GCPhys, ppPage, ppRamHint);
|
---|
161 |
|
---|
162 | STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits));
|
---|
163 | *ppRamHint = pRam;
|
---|
164 | }
|
---|
165 | *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
|
---|
166 | return VINF_SUCCESS;
|
---|
167 | }
|
---|
168 |
|
---|
169 |
|
---|
170 | /**
|
---|
171 | * Gets the PGMPAGE structure for a guest page together with the PGMRAMRANGE.
|
---|
172 | *
|
---|
173 | * @returns Pointer to the page on success.
|
---|
174 | * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
|
---|
175 | *
|
---|
176 | * @param pVM Pointer to the VM.
|
---|
177 | * @param GCPhys The GC physical address.
|
---|
178 | * @param ppPage Where to store the pointer to the PGMPAGE structure.
|
---|
179 | * @param ppRam Where to store the pointer to the PGMRAMRANGE structure.
|
---|
180 | */
|
---|
181 | DECLINLINE(int) pgmPhysGetPageAndRangeEx(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
|
---|
182 | {
|
---|
183 | PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
|
---|
184 | RTGCPHYS off;
|
---|
185 | if ( !pRam
|
---|
186 | || (off = GCPhys - pRam->GCPhys) >= pRam->cb)
|
---|
187 | return pgmPhysGetPageAndRangeExSlow(pVM, GCPhys, ppPage, ppRam);
|
---|
188 |
|
---|
189 | STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits));
|
---|
190 | *ppRam = pRam;
|
---|
191 | *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
|
---|
192 | return VINF_SUCCESS;
|
---|
193 | }
|
---|
194 |
|
---|
195 |
|
---|
196 | /**
|
---|
197 | * Convert GC Phys to HC Phys.
|
---|
198 | *
|
---|
199 | * @returns VBox status.
|
---|
200 | * @param pVM Pointer to the VM.
|
---|
201 | * @param GCPhys The GC physical address.
|
---|
202 | * @param pHCPhys Where to store the corresponding HC physical address.
|
---|
203 | *
|
---|
204 | * @deprecated Doesn't deal with zero, shared or write monitored pages.
|
---|
205 | * Avoid when writing new code!
|
---|
206 | */
|
---|
207 | DECLINLINE(int) pgmRamGCPhys2HCPhys(PVM pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
|
---|
208 | {
|
---|
209 | PPGMPAGE pPage;
|
---|
210 | int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
|
---|
211 | if (RT_FAILURE(rc))
|
---|
212 | return rc;
|
---|
213 | *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
|
---|
214 | return VINF_SUCCESS;
|
---|
215 | }
|
---|
216 |
|
---|
217 | #if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)
|
---|
218 |
|
---|
219 | /**
|
---|
220 | * Inlined version of the ring-0 version of the host page mapping code
|
---|
221 | * that optimizes access to pages already in the set.
|
---|
222 | *
|
---|
223 | * @returns VINF_SUCCESS. Will bail out to ring-3 on failure.
|
---|
224 | * @param pVCpu Pointer to the VMCPU.
|
---|
225 | * @param HCPhys The physical address of the page.
|
---|
226 | * @param ppv Where to store the mapping address.
|
---|
227 | */
|
---|
228 | DECLINLINE(int) pgmRZDynMapHCPageInlined(PVMCPU pVCpu, RTHCPHYS HCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
|
---|
229 | {
|
---|
230 | PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;
|
---|
231 |
|
---|
232 | STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInl, a);
|
---|
233 | Assert(!(HCPhys & PAGE_OFFSET_MASK));
|
---|
234 | Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
|
---|
235 |
|
---|
236 | unsigned iHash = PGMMAPSET_HASH(HCPhys);
|
---|
237 | unsigned iEntry = pSet->aiHashTable[iHash];
|
---|
238 | if ( iEntry < pSet->cEntries
|
---|
239 | && pSet->aEntries[iEntry].HCPhys == HCPhys
|
---|
240 | && pSet->aEntries[iEntry].cInlinedRefs < UINT16_MAX - 1)
|
---|
241 | {
|
---|
242 | pSet->aEntries[iEntry].cInlinedRefs++;
|
---|
243 | *ppv = pSet->aEntries[iEntry].pvPage;
|
---|
244 | STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInlHits);
|
---|
245 | }
|
---|
246 | else
|
---|
247 | {
|
---|
248 | STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInlMisses);
|
---|
249 | pgmRZDynMapHCPageCommon(pSet, HCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
|
---|
250 | }
|
---|
251 |
|
---|
252 | STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInl, a);
|
---|
253 | return VINF_SUCCESS;
|
---|
254 | }
|
---|
255 |
|
---|
256 |
|
---|
257 | /**
|
---|
258 | * Inlined version of the guest page mapping code that optimizes access to pages
|
---|
259 | * already in the set.
|
---|
260 | *
|
---|
261 | * @returns VBox status code, see pgmRZDynMapGCPageCommon for details.
|
---|
262 | * @param pVM Pointer to the VM.
|
---|
263 | * @param pVCpu Pointer to the VMCPU.
|
---|
264 | * @param GCPhys The guest physical address of the page.
|
---|
265 | * @param ppv Where to store the mapping address.
|
---|
266 | */
|
---|
267 | DECLINLINE(int) pgmRZDynMapGCPageV2Inlined(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
|
---|
268 | {
|
---|
269 | STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInl, a);
|
---|
270 | AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("%RGp\n", GCPhys));
|
---|
271 |
|
---|
272 | /*
|
---|
273 | * Get the ram range.
|
---|
274 | */
|
---|
275 | PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
|
---|
276 | RTGCPHYS off;
|
---|
277 | if ( !pRam
|
---|
278 | || (off = GCPhys - pRam->GCPhys) >= pRam->cb
|
---|
279 | /** @todo || page state stuff */
|
---|
280 | )
|
---|
281 | {
|
---|
282 | /* This case is not counted into StatRZDynMapGCPageInl. */
|
---|
283 | STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamMisses);
|
---|
284 | return pgmRZDynMapGCPageCommon(pVM, pVCpu, GCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
|
---|
285 | }
|
---|
286 |
|
---|
287 | RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[off >> PAGE_SHIFT]);
|
---|
288 | STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamHits);
|
---|
289 |
|
---|
290 | /*
|
---|
291 | * pgmRZDynMapHCPageInlined with out stats.
|
---|
292 | */
|
---|
293 | PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;
|
---|
294 | Assert(!(HCPhys & PAGE_OFFSET_MASK));
|
---|
295 | Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
|
---|
296 |
|
---|
297 | unsigned iHash = PGMMAPSET_HASH(HCPhys);
|
---|
298 | unsigned iEntry = pSet->aiHashTable[iHash];
|
---|
299 | if ( iEntry < pSet->cEntries
|
---|
300 | && pSet->aEntries[iEntry].HCPhys == HCPhys
|
---|
301 | && pSet->aEntries[iEntry].cInlinedRefs < UINT16_MAX - 1)
|
---|
302 | {
|
---|
303 | pSet->aEntries[iEntry].cInlinedRefs++;
|
---|
304 | *ppv = pSet->aEntries[iEntry].pvPage;
|
---|
305 | STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlHits);
|
---|
306 | }
|
---|
307 | else
|
---|
308 | {
|
---|
309 | STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlMisses);
|
---|
310 | pgmRZDynMapHCPageCommon(pSet, HCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
|
---|
311 | }
|
---|
312 |
|
---|
313 | STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInl, a);
|
---|
314 | return VINF_SUCCESS;
|
---|
315 | }
|
---|
316 |
|
---|
317 |
|
---|
318 | /**
|
---|
319 | * Inlined version of the ring-0 version of guest page mapping that optimizes
|
---|
320 | * access to pages already in the set.
|
---|
321 | *
|
---|
322 | * @returns VBox status code, see pgmRZDynMapGCPageCommon for details.
|
---|
323 | * @param pVCpu Pointer to the VMCPU.
|
---|
324 | * @param GCPhys The guest physical address of the page.
|
---|
325 | * @param ppv Where to store the mapping address.
|
---|
326 | */
|
---|
327 | DECLINLINE(int) pgmRZDynMapGCPageInlined(PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
|
---|
328 | {
|
---|
329 | return pgmRZDynMapGCPageV2Inlined(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
|
---|
330 | }
|
---|
331 |
|
---|
332 |
|
---|
333 | /**
|
---|
334 | * Inlined version of the ring-0 version of the guest byte mapping code
|
---|
335 | * that optimizes access to pages already in the set.
|
---|
336 | *
|
---|
337 | * @returns VBox status code, see pgmRZDynMapGCPageCommon for details.
|
---|
338 | * @param pVCpu Pointer to the VMCPU.
|
---|
339 | * @param HCPhys The physical address of the page.
|
---|
340 | * @param ppv Where to store the mapping address. The offset is
|
---|
341 | * preserved.
|
---|
342 | */
|
---|
343 | DECLINLINE(int) pgmRZDynMapGCPageOffInlined(PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
|
---|
344 | {
|
---|
345 | STAM_PROFILE_START(&pVCpu->pgm.s.StatRZDynMapGCPageInl, a);
|
---|
346 |
|
---|
347 | /*
|
---|
348 | * Get the ram range.
|
---|
349 | */
|
---|
350 | PVM pVM = pVCpu->CTX_SUFF(pVM);
|
---|
351 | PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
|
---|
352 | RTGCPHYS off;
|
---|
353 | if ( !pRam
|
---|
354 | || (off = GCPhys - pRam->GCPhys) >= pRam->cb
|
---|
355 | /** @todo || page state stuff */
|
---|
356 | )
|
---|
357 | {
|
---|
358 | /* This case is not counted into StatRZDynMapGCPageInl. */
|
---|
359 | STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamMisses);
|
---|
360 | return pgmRZDynMapGCPageCommon(pVM, pVCpu, GCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
|
---|
361 | }
|
---|
362 |
|
---|
363 | RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[off >> PAGE_SHIFT]);
|
---|
364 | STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamHits);
|
---|
365 |
|
---|
366 | /*
|
---|
367 | * pgmRZDynMapHCPageInlined with out stats.
|
---|
368 | */
|
---|
369 | PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;
|
---|
370 | Assert(!(HCPhys & PAGE_OFFSET_MASK));
|
---|
371 | Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
|
---|
372 |
|
---|
373 | unsigned iHash = PGMMAPSET_HASH(HCPhys);
|
---|
374 | unsigned iEntry = pSet->aiHashTable[iHash];
|
---|
375 | if ( iEntry < pSet->cEntries
|
---|
376 | && pSet->aEntries[iEntry].HCPhys == HCPhys
|
---|
377 | && pSet->aEntries[iEntry].cInlinedRefs < UINT16_MAX - 1)
|
---|
378 | {
|
---|
379 | STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlHits);
|
---|
380 | pSet->aEntries[iEntry].cInlinedRefs++;
|
---|
381 | *ppv = (void *)((uintptr_t)pSet->aEntries[iEntry].pvPage | (PAGE_OFFSET_MASK & (uintptr_t)GCPhys));
|
---|
382 | }
|
---|
383 | else
|
---|
384 | {
|
---|
385 | STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlMisses);
|
---|
386 | pgmRZDynMapHCPageCommon(pSet, HCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
|
---|
387 | *ppv = (void *)((uintptr_t)*ppv | (PAGE_OFFSET_MASK & (uintptr_t)GCPhys));
|
---|
388 | }
|
---|
389 |
|
---|
390 | STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInl, a);
|
---|
391 | return VINF_SUCCESS;
|
---|
392 | }
|
---|
393 |
|
---|
394 | #endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
|
---|
395 | #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
|
---|
396 |
|
---|
397 | /**
|
---|
398 | * Maps the page into current context (RC and maybe R0).
|
---|
399 | *
|
---|
400 | * @returns pointer to the mapping.
|
---|
401 | * @param pVM Pointer to the PGM instance data.
|
---|
402 | * @param pPage The page.
|
---|
403 | */
|
---|
404 | DECLINLINE(void *) pgmPoolMapPageInlined(PVM pVM, PPGMPOOLPAGE pPage RTLOG_COMMA_SRC_POS_DECL)
|
---|
405 | {
|
---|
406 | if (pPage->idx >= PGMPOOL_IDX_FIRST)
|
---|
407 | {
|
---|
408 | Assert(pPage->idx < pVM->pgm.s.CTX_SUFF(pPool)->cCurPages);
|
---|
409 | void *pv;
|
---|
410 | pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), pPage->Core.Key, &pv RTLOG_COMMA_SRC_POS_ARGS);
|
---|
411 | return pv;
|
---|
412 | }
|
---|
413 | AssertFatalMsgFailed(("pgmPoolMapPageInlined invalid page index %x\n", pPage->idx));
|
---|
414 | }
|
---|
415 |
|
---|
416 |
|
---|
417 | /**
|
---|
418 | * Maps the page into current context (RC and maybe R0).
|
---|
419 | *
|
---|
420 | * @returns pointer to the mapping.
|
---|
421 | * @param pVM Pointer to the PGM instance data.
|
---|
422 | * @param pVCpu Pointer to the VMCPU.
|
---|
423 | * @param pPage The page.
|
---|
424 | */
|
---|
425 | DECLINLINE(void *) pgmPoolMapPageV2Inlined(PVM pVM, PVMCPU pVCpu, PPGMPOOLPAGE pPage RTLOG_COMMA_SRC_POS_DECL)
|
---|
426 | {
|
---|
427 | if (pPage->idx >= PGMPOOL_IDX_FIRST)
|
---|
428 | {
|
---|
429 | Assert(pPage->idx < pVM->pgm.s.CTX_SUFF(pPool)->cCurPages);
|
---|
430 | void *pv;
|
---|
431 | Assert(pVCpu == VMMGetCpu(pVM));
|
---|
432 | pgmRZDynMapHCPageInlined(pVCpu, pPage->Core.Key, &pv RTLOG_COMMA_SRC_POS_ARGS);
|
---|
433 | return pv;
|
---|
434 | }
|
---|
435 | AssertFatalMsgFailed(("pgmPoolMapPageV2Inlined invalid page index %x\n", pPage->idx));
|
---|
436 | }
|
---|
437 |
|
---|
438 | #endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 || IN_RC */
|
---|
439 | #ifndef IN_RC
|
---|
440 |
|
---|
441 | /**
|
---|
442 | * Queries the Physical TLB entry for a physical guest page,
|
---|
443 | * attempting to load the TLB entry if necessary.
|
---|
444 | *
|
---|
445 | * @returns VBox status code.
|
---|
446 | * @retval VINF_SUCCESS on success
|
---|
447 | * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
|
---|
448 | *
|
---|
449 | * @param pVM Pointer to the VM.
|
---|
450 | * @param GCPhys The address of the guest page.
|
---|
451 | * @param ppTlbe Where to store the pointer to the TLB entry.
|
---|
452 | */
|
---|
453 | DECLINLINE(int) pgmPhysPageQueryTlbe(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
|
---|
454 | {
|
---|
455 | int rc;
|
---|
456 | PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
|
---|
457 | if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
|
---|
458 | {
|
---|
459 | STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbHits));
|
---|
460 | rc = VINF_SUCCESS;
|
---|
461 | }
|
---|
462 | else
|
---|
463 | rc = pgmPhysPageLoadIntoTlb(pVM, GCPhys);
|
---|
464 | *ppTlbe = pTlbe;
|
---|
465 | return rc;
|
---|
466 | }
|
---|
467 |
|
---|
468 |
|
---|
469 | /**
|
---|
470 | * Queries the Physical TLB entry for a physical guest page,
|
---|
471 | * attempting to load the TLB entry if necessary.
|
---|
472 | *
|
---|
473 | * @returns VBox status code.
|
---|
474 | * @retval VINF_SUCCESS on success
|
---|
475 | * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
|
---|
476 | *
|
---|
477 | * @param pVM Pointer to the VM.
|
---|
478 | * @param pPage Pointer to the PGMPAGE structure corresponding to
|
---|
479 | * GCPhys.
|
---|
480 | * @param GCPhys The address of the guest page.
|
---|
481 | * @param ppTlbe Where to store the pointer to the TLB entry.
|
---|
482 | */
|
---|
483 | DECLINLINE(int) pgmPhysPageQueryTlbeWithPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
|
---|
484 | {
|
---|
485 | int rc;
|
---|
486 | PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
|
---|
487 | if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
|
---|
488 | {
|
---|
489 | STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbHits));
|
---|
490 | rc = VINF_SUCCESS;
|
---|
491 | # if 0 //def VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
|
---|
492 | # ifdef IN_RING3
|
---|
493 | if (pTlbe->pv == (void *)pVM->pgm.s.pvZeroPgR0)
|
---|
494 | # else
|
---|
495 | if (pTlbe->pv == (void *)pVM->pgm.s.pvZeroPgR3)
|
---|
496 | # endif
|
---|
497 | pTlbe->pv = pVM->pgm.s.CTX_SUFF(pvZeroPg);
|
---|
498 | # endif
|
---|
499 | AssertPtr(pTlbe->pv);
|
---|
500 | # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
|
---|
501 | Assert(!pTlbe->pMap || RT_VALID_PTR(pTlbe->pMap->pv));
|
---|
502 | # endif
|
---|
503 | }
|
---|
504 | else
|
---|
505 | rc = pgmPhysPageLoadIntoTlbWithPage(pVM, pPage, GCPhys);
|
---|
506 | *ppTlbe = pTlbe;
|
---|
507 | return rc;
|
---|
508 | }
|
---|
509 |
|
---|
510 | #endif /* !IN_RC */
|
---|
511 |
|
---|
512 | /**
|
---|
513 | * Enables write monitoring for an allocated page.
|
---|
514 | *
|
---|
515 | * The caller is responsible for updating the shadow page tables.
|
---|
516 | *
|
---|
517 | * @param pVM Pointer to the VM.
|
---|
518 | * @param pPage The page to write monitor.
|
---|
519 | * @param GCPhysPage The address of the page.
|
---|
520 | */
|
---|
521 | DECLINLINE(void) pgmPhysPageWriteMonitor(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage)
|
---|
522 | {
|
---|
523 | Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
|
---|
524 | PGM_LOCK_ASSERT_OWNER(pVM);
|
---|
525 |
|
---|
526 | PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_WRITE_MONITORED);
|
---|
527 | pVM->pgm.s.cMonitoredPages++;
|
---|
528 |
|
---|
529 | /* Large pages must disabled. */
|
---|
530 | if (PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE)
|
---|
531 | {
|
---|
532 | PPGMPAGE pFirstPage = pgmPhysGetPage(pVM, GCPhysPage & X86_PDE2M_PAE_PG_MASK);
|
---|
533 | AssertFatal(pFirstPage);
|
---|
534 | if (PGM_PAGE_GET_PDE_TYPE(pFirstPage) == PGM_PAGE_PDE_TYPE_PDE)
|
---|
535 | {
|
---|
536 | PGM_PAGE_SET_PDE_TYPE(pVM, pFirstPage, PGM_PAGE_PDE_TYPE_PDE_DISABLED);
|
---|
537 | pVM->pgm.s.cLargePagesDisabled++;
|
---|
538 | }
|
---|
539 | else
|
---|
540 | Assert(PGM_PAGE_GET_PDE_TYPE(pFirstPage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED);
|
---|
541 | }
|
---|
542 | }
|
---|
543 |
|
---|
544 |
|
---|
545 | /**
|
---|
546 | * Checks if the no-execute (NX) feature is active (EFER.NXE=1).
|
---|
547 | *
|
---|
548 | * Only used when the guest is in PAE or long mode. This is inlined so that we
|
---|
549 | * can perform consistency checks in debug builds.
|
---|
550 | *
|
---|
551 | * @returns true if it is, false if it isn't.
|
---|
552 | * @param pVCpu Pointer to the VMCPU.
|
---|
553 | */
|
---|
554 | DECL_FORCE_INLINE(bool) pgmGstIsNoExecuteActive(PVMCPU pVCpu)
|
---|
555 | {
|
---|
556 | Assert(pVCpu->pgm.s.fNoExecuteEnabled == CPUMIsGuestNXEnabled(pVCpu));
|
---|
557 | Assert(CPUMIsGuestInPAEMode(pVCpu) || CPUMIsGuestInLongMode(pVCpu));
|
---|
558 | return pVCpu->pgm.s.fNoExecuteEnabled;
|
---|
559 | }
|
---|
560 |
|
---|
561 |
|
---|
562 | /**
|
---|
563 | * Checks if the page size extension (PSE) is currently enabled (CR4.PSE=1).
|
---|
564 | *
|
---|
565 | * Only used when the guest is in paged 32-bit mode. This is inlined so that
|
---|
566 | * we can perform consistency checks in debug builds.
|
---|
567 | *
|
---|
568 | * @returns true if it is, false if it isn't.
|
---|
569 | * @param pVCpu Pointer to the VMCPU.
|
---|
570 | */
|
---|
571 | DECL_FORCE_INLINE(bool) pgmGst32BitIsPageSizeExtActive(PVMCPU pVCpu)
|
---|
572 | {
|
---|
573 | Assert(pVCpu->pgm.s.fGst32BitPageSizeExtension == CPUMIsGuestPageSizeExtEnabled(pVCpu));
|
---|
574 | Assert(!CPUMIsGuestInPAEMode(pVCpu));
|
---|
575 | Assert(!CPUMIsGuestInLongMode(pVCpu));
|
---|
576 | return pVCpu->pgm.s.fGst32BitPageSizeExtension;
|
---|
577 | }
|
---|
578 |
|
---|
579 |
|
---|
580 | /**
|
---|
581 | * Calculated the guest physical address of the large (4 MB) page in 32 bits paging mode.
|
---|
582 | * Takes PSE-36 into account.
|
---|
583 | *
|
---|
584 | * @returns guest physical address
|
---|
585 | * @param pVM Pointer to the VM.
|
---|
586 | * @param Pde Guest Pde
|
---|
587 | */
|
---|
588 | DECLINLINE(RTGCPHYS) pgmGstGet4MBPhysPage(PVM pVM, X86PDE Pde)
|
---|
589 | {
|
---|
590 | RTGCPHYS GCPhys = Pde.u & X86_PDE4M_PG_MASK;
|
---|
591 | GCPhys |= (RTGCPHYS)Pde.b.u8PageNoHigh << 32;
|
---|
592 |
|
---|
593 | return GCPhys & pVM->pgm.s.GCPhys4MBPSEMask;
|
---|
594 | }
|
---|
595 |
|
---|
596 |
|
---|
597 | /**
|
---|
598 | * Gets the address the guest page directory (32-bit paging).
|
---|
599 | *
|
---|
600 | * @returns VBox status code.
|
---|
601 | * @param pVCpu Pointer to the VMCPU.
|
---|
602 | * @param ppPd Where to return the mapping. This is always set.
|
---|
603 | */
|
---|
604 | DECLINLINE(int) pgmGstGet32bitPDPtrEx(PVMCPU pVCpu, PX86PD *ppPd)
|
---|
605 | {
|
---|
606 | #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
|
---|
607 | int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPd RTLOG_COMMA_SRC_POS);
|
---|
608 | if (RT_FAILURE(rc))
|
---|
609 | {
|
---|
610 | *ppPd = NULL;
|
---|
611 | return rc;
|
---|
612 | }
|
---|
613 | #else
|
---|
614 | *ppPd = pVCpu->pgm.s.CTX_SUFF(pGst32BitPd);
|
---|
615 | if (RT_UNLIKELY(!*ppPd))
|
---|
616 | return pgmGstLazyMap32BitPD(pVCpu, ppPd);
|
---|
617 | #endif
|
---|
618 | return VINF_SUCCESS;
|
---|
619 | }
|
---|
620 |
|
---|
621 |
|
---|
622 | /**
|
---|
623 | * Gets the address the guest page directory (32-bit paging).
|
---|
624 | *
|
---|
625 | * @returns Pointer to the page directory entry in question.
|
---|
626 | * @param pVCpu Pointer to the VMCPU.
|
---|
627 | */
|
---|
628 | DECLINLINE(PX86PD) pgmGstGet32bitPDPtr(PVMCPU pVCpu)
|
---|
629 | {
|
---|
630 | #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
|
---|
631 | PX86PD pGuestPD = NULL;
|
---|
632 | int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPD RTLOG_COMMA_SRC_POS);
|
---|
633 | if (RT_FAILURE(rc))
|
---|
634 | {
|
---|
635 | AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
|
---|
636 | return NULL;
|
---|
637 | }
|
---|
638 | #else
|
---|
639 | PX86PD pGuestPD = pVCpu->pgm.s.CTX_SUFF(pGst32BitPd);
|
---|
640 | if (RT_UNLIKELY(!pGuestPD))
|
---|
641 | {
|
---|
642 | int rc = pgmGstLazyMap32BitPD(pVCpu, &pGuestPD);
|
---|
643 | if (RT_FAILURE(rc))
|
---|
644 | return NULL;
|
---|
645 | }
|
---|
646 | #endif
|
---|
647 | return pGuestPD;
|
---|
648 | }
|
---|
649 |
|
---|
650 |
|
---|
651 | /**
|
---|
652 | * Gets the guest page directory pointer table.
|
---|
653 | *
|
---|
654 | * @returns VBox status code.
|
---|
655 | * @param pVCpu Pointer to the VMCPU.
|
---|
656 | * @param ppPdpt Where to return the mapping. This is always set.
|
---|
657 | */
|
---|
658 | DECLINLINE(int) pgmGstGetPaePDPTPtrEx(PVMCPU pVCpu, PX86PDPT *ppPdpt)
|
---|
659 | {
|
---|
660 | #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
|
---|
661 | int rc = pgmRZDynMapGCPageOffInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPdpt RTLOG_COMMA_SRC_POS);
|
---|
662 | if (RT_FAILURE(rc))
|
---|
663 | {
|
---|
664 | *ppPdpt = NULL;
|
---|
665 | return rc;
|
---|
666 | }
|
---|
667 | #else
|
---|
668 | *ppPdpt = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
|
---|
669 | if (RT_UNLIKELY(!*ppPdpt))
|
---|
670 | return pgmGstLazyMapPaePDPT(pVCpu, ppPdpt);
|
---|
671 | #endif
|
---|
672 | return VINF_SUCCESS;
|
---|
673 | }
|
---|
674 |
|
---|
675 |
|
---|
676 | /**
|
---|
677 | * Gets the guest page directory pointer table.
|
---|
678 | *
|
---|
679 | * @returns Pointer to the page directory in question.
|
---|
680 | * @returns NULL if the page directory is not present or on an invalid page.
|
---|
681 | * @param pVCpu Pointer to the VMCPU.
|
---|
682 | */
|
---|
683 | DECLINLINE(PX86PDPT) pgmGstGetPaePDPTPtr(PVMCPU pVCpu)
|
---|
684 | {
|
---|
685 | PX86PDPT pGuestPdpt;
|
---|
686 | int rc = pgmGstGetPaePDPTPtrEx(pVCpu, &pGuestPdpt);
|
---|
687 | AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc);
|
---|
688 | return pGuestPdpt;
|
---|
689 | }
|
---|
690 |
|
---|
691 |
|
---|
692 | /**
|
---|
693 | * Gets the guest page directory pointer table entry for the specified address.
|
---|
694 | *
|
---|
695 | * @returns Pointer to the page directory in question.
|
---|
696 | * @returns NULL if the page directory is not present or on an invalid page.
|
---|
697 | * @param pVCpu Pointer to the VMCPU.
|
---|
698 | * @param GCPtr The address.
|
---|
699 | */
|
---|
700 | DECLINLINE(PX86PDPE) pgmGstGetPaePDPEPtr(PVMCPU pVCpu, RTGCPTR GCPtr)
|
---|
701 | {
|
---|
702 | AssertGCPtr32(GCPtr);
|
---|
703 |
|
---|
704 | #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
|
---|
705 | PX86PDPT pGuestPDPT = NULL;
|
---|
706 | int rc = pgmRZDynMapGCPageOffInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPDPT RTLOG_COMMA_SRC_POS);
|
---|
707 | AssertRCReturn(rc, NULL);
|
---|
708 | #else
|
---|
709 | PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
|
---|
710 | if (RT_UNLIKELY(!pGuestPDPT))
|
---|
711 | {
|
---|
712 | int rc = pgmGstLazyMapPaePDPT(pVCpu, &pGuestPDPT);
|
---|
713 | if (RT_FAILURE(rc))
|
---|
714 | return NULL;
|
---|
715 | }
|
---|
716 | #endif
|
---|
717 | return &pGuestPDPT->a[(GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE];
|
---|
718 | }
|
---|
719 |
|
---|
720 |
|
---|
721 | /**
|
---|
722 | * Gets the page directory entry for the specified address.
|
---|
723 | *
|
---|
724 | * @returns The page directory entry in question.
|
---|
725 | * @returns A non-present entry if the page directory is not present or on an invalid page.
|
---|
726 | * @param pVCpu The handle of the virtual CPU.
|
---|
727 | * @param GCPtr The address.
|
---|
728 | */
|
---|
729 | DECLINLINE(X86PDEPAE) pgmGstGetPaePDE(PVMCPU pVCpu, RTGCPTR GCPtr)
|
---|
730 | {
|
---|
731 | AssertGCPtr32(GCPtr);
|
---|
732 | PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pVCpu);
|
---|
733 | if (RT_LIKELY(pGuestPDPT))
|
---|
734 | {
|
---|
735 | const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
|
---|
736 | if ( pGuestPDPT->a[iPdpt].n.u1Present
|
---|
737 | && !(pGuestPDPT->a[iPdpt].u & pVCpu->pgm.s.fGstPaeMbzPdpeMask) )
|
---|
738 | {
|
---|
739 | const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
|
---|
740 | #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
|
---|
741 | PX86PDPAE pGuestPD = NULL;
|
---|
742 | int rc = pgmRZDynMapGCPageInlined(pVCpu,
|
---|
743 | pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK,
|
---|
744 | (void **)&pGuestPD
|
---|
745 | RTLOG_COMMA_SRC_POS);
|
---|
746 | if (RT_SUCCESS(rc))
|
---|
747 | return pGuestPD->a[iPD];
|
---|
748 | AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
|
---|
749 | #else
|
---|
750 | PX86PDPAE pGuestPD = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
|
---|
751 | if ( !pGuestPD
|
---|
752 | || (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt])
|
---|
753 | pgmGstLazyMapPaePD(pVCpu, iPdpt, &pGuestPD);
|
---|
754 | if (pGuestPD)
|
---|
755 | return pGuestPD->a[iPD];
|
---|
756 | #endif
|
---|
757 | }
|
---|
758 | }
|
---|
759 |
|
---|
760 | X86PDEPAE ZeroPde = {0};
|
---|
761 | return ZeroPde;
|
---|
762 | }
|
---|
763 |
|
---|
764 |
|
---|
765 | /**
|
---|
766 | * Gets the page directory pointer table entry for the specified address
|
---|
767 | * and returns the index into the page directory
|
---|
768 | *
|
---|
769 | * @returns Pointer to the page directory in question.
|
---|
770 | * @returns NULL if the page directory is not present or on an invalid page.
|
---|
771 | * @param pVCpu Pointer to the VMCPU.
|
---|
772 | * @param GCPtr The address.
|
---|
773 | * @param piPD Receives the index into the returned page directory
|
---|
774 | * @param pPdpe Receives the page directory pointer entry. Optional.
|
---|
775 | */
|
---|
776 | DECLINLINE(PX86PDPAE) pgmGstGetPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, unsigned *piPD, PX86PDPE pPdpe)
|
---|
777 | {
|
---|
778 | AssertGCPtr32(GCPtr);
|
---|
779 |
|
---|
780 | /* The PDPE. */
|
---|
781 | PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pVCpu);
|
---|
782 | if (RT_UNLIKELY(!pGuestPDPT))
|
---|
783 | return NULL;
|
---|
784 | const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
|
---|
785 | if (pPdpe)
|
---|
786 | *pPdpe = pGuestPDPT->a[iPdpt];
|
---|
787 | if (!pGuestPDPT->a[iPdpt].n.u1Present)
|
---|
788 | return NULL;
|
---|
789 | if (RT_UNLIKELY(pVCpu->pgm.s.fGstPaeMbzPdpeMask & pGuestPDPT->a[iPdpt].u))
|
---|
790 | return NULL;
|
---|
791 |
|
---|
792 | /* The PDE. */
|
---|
793 | #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
|
---|
794 | PX86PDPAE pGuestPD = NULL;
|
---|
795 | int rc = pgmRZDynMapGCPageInlined(pVCpu,
|
---|
796 | pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK,
|
---|
797 | (void **)&pGuestPD
|
---|
798 | RTLOG_COMMA_SRC_POS);
|
---|
799 | if (RT_FAILURE(rc))
|
---|
800 | {
|
---|
801 | AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
|
---|
802 | return NULL;
|
---|
803 | }
|
---|
804 | #else
|
---|
805 | PX86PDPAE pGuestPD = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
|
---|
806 | if ( !pGuestPD
|
---|
807 | || (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt])
|
---|
808 | pgmGstLazyMapPaePD(pVCpu, iPdpt, &pGuestPD);
|
---|
809 | #endif
|
---|
810 |
|
---|
811 | *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
|
---|
812 | return pGuestPD;
|
---|
813 | }
|
---|
814 |
|
---|
815 | #ifndef IN_RC
|
---|
816 |
|
---|
817 | /**
|
---|
818 | * Gets the page map level-4 pointer for the guest.
|
---|
819 | *
|
---|
820 | * @returns VBox status code.
|
---|
821 | * @param pVCpu Pointer to the VMCPU.
|
---|
822 | * @param ppPml4 Where to return the mapping. Always set.
|
---|
823 | */
|
---|
824 | DECLINLINE(int) pgmGstGetLongModePML4PtrEx(PVMCPU pVCpu, PX86PML4 *ppPml4)
|
---|
825 | {
|
---|
826 | #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
|
---|
827 | int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPml4 RTLOG_COMMA_SRC_POS);
|
---|
828 | if (RT_FAILURE(rc))
|
---|
829 | {
|
---|
830 | *ppPml4 = NULL;
|
---|
831 | return rc;
|
---|
832 | }
|
---|
833 | #else
|
---|
834 | *ppPml4 = pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4);
|
---|
835 | if (RT_UNLIKELY(!*ppPml4))
|
---|
836 | return pgmGstLazyMapPml4(pVCpu, ppPml4);
|
---|
837 | #endif
|
---|
838 | return VINF_SUCCESS;
|
---|
839 | }
|
---|
840 |
|
---|
841 |
|
---|
842 | /**
|
---|
843 | * Gets the page map level-4 pointer for the guest.
|
---|
844 | *
|
---|
845 | * @returns Pointer to the PML4 page.
|
---|
846 | * @param pVCpu Pointer to the VMCPU.
|
---|
847 | */
|
---|
848 | DECLINLINE(PX86PML4) pgmGstGetLongModePML4Ptr(PVMCPU pVCpu)
|
---|
849 | {
|
---|
850 | PX86PML4 pGuestPml4;
|
---|
851 | int rc = pgmGstGetLongModePML4PtrEx(pVCpu, &pGuestPml4);
|
---|
852 | AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc);
|
---|
853 | return pGuestPml4;
|
---|
854 | }
|
---|
855 |
|
---|
856 |
|
---|
857 | /**
|
---|
858 | * Gets the pointer to a page map level-4 entry.
|
---|
859 | *
|
---|
860 | * @returns Pointer to the PML4 entry.
|
---|
861 | * @param pVCpu Pointer to the VMCPU.
|
---|
862 | * @param iPml4 The index.
|
---|
863 | * @remarks Only used by AssertCR3.
|
---|
864 | */
|
---|
865 | DECLINLINE(PX86PML4E) pgmGstGetLongModePML4EPtr(PVMCPU pVCpu, unsigned int iPml4)
|
---|
866 | {
|
---|
867 | #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
|
---|
868 | PX86PML4 pGuestPml4;
|
---|
869 | int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPml4 RTLOG_COMMA_SRC_POS);
|
---|
870 | AssertRCReturn(rc, NULL);
|
---|
871 | #else
|
---|
872 | PX86PML4 pGuestPml4 = pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4);
|
---|
873 | if (RT_UNLIKELY(!pGuestPml4))
|
---|
874 | {
|
---|
875 | int rc = pgmGstLazyMapPml4(pVCpu, &pGuestPml4);
|
---|
876 | AssertRCReturn(rc, NULL);
|
---|
877 | }
|
---|
878 | #endif
|
---|
879 | return &pGuestPml4->a[iPml4];
|
---|
880 | }
|
---|
881 |
|
---|
882 |
|
---|
883 | /**
|
---|
884 | * Gets the page directory entry for the specified address.
|
---|
885 | *
|
---|
886 | * @returns The page directory entry in question.
|
---|
887 | * @returns A non-present entry if the page directory is not present or on an invalid page.
|
---|
888 | * @param pVCpu Pointer to the VMCPU.
|
---|
889 | * @param GCPtr The address.
|
---|
890 | */
|
---|
891 | DECLINLINE(X86PDEPAE) pgmGstGetLongModePDE(PVMCPU pVCpu, RTGCPTR64 GCPtr)
|
---|
892 | {
|
---|
893 | /*
|
---|
894 | * Note! To keep things simple, ASSUME invalid physical addresses will
|
---|
895 | * cause X86_TRAP_PF_RSVD. This isn't a problem until we start
|
---|
896 | * supporting 52-bit wide physical guest addresses.
|
---|
897 | */
|
---|
898 | PCX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pVCpu);
|
---|
899 | const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
|
---|
900 | if ( RT_LIKELY(pGuestPml4)
|
---|
901 | && pGuestPml4->a[iPml4].n.u1Present
|
---|
902 | && !(pGuestPml4->a[iPml4].u & pVCpu->pgm.s.fGstAmd64MbzPml4eMask) )
|
---|
903 | {
|
---|
904 | PCX86PDPT pPdptTemp;
|
---|
905 | int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, pGuestPml4->a[iPml4].u & X86_PML4E_PG_MASK, &pPdptTemp);
|
---|
906 | if (RT_SUCCESS(rc))
|
---|
907 | {
|
---|
908 | const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
|
---|
909 | if ( pPdptTemp->a[iPdpt].n.u1Present
|
---|
910 | && !(pPdptTemp->a[iPdpt].u & pVCpu->pgm.s.fGstAmd64MbzPdpeMask) )
|
---|
911 | {
|
---|
912 | PCX86PDPAE pPD;
|
---|
913 | rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
|
---|
914 | if (RT_SUCCESS(rc))
|
---|
915 | {
|
---|
916 | const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
|
---|
917 | return pPD->a[iPD];
|
---|
918 | }
|
---|
919 | }
|
---|
920 | }
|
---|
921 | AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
|
---|
922 | }
|
---|
923 |
|
---|
924 | X86PDEPAE ZeroPde = {0};
|
---|
925 | return ZeroPde;
|
---|
926 | }
|
---|
927 |
|
---|
928 |
|
---|
929 | /**
|
---|
930 | * Gets the GUEST page directory pointer for the specified address.
|
---|
931 | *
|
---|
932 | * @returns The page directory in question.
|
---|
933 | * @returns NULL if the page directory is not present or on an invalid page.
|
---|
934 | * @param pVCpu Pointer to the VMCPU.
|
---|
935 | * @param GCPtr The address.
|
---|
936 | * @param ppPml4e Page Map Level-4 Entry (out)
|
---|
937 | * @param pPdpe Page directory pointer table entry (out)
|
---|
938 | * @param piPD Receives the index into the returned page directory
|
---|
939 | */
|
---|
940 | DECLINLINE(PX86PDPAE) pgmGstGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPE pPdpe, unsigned *piPD)
|
---|
941 | {
|
---|
942 | /* The PMLE4. */
|
---|
943 | PX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pVCpu);
|
---|
944 | if (RT_UNLIKELY(!pGuestPml4))
|
---|
945 | return NULL;
|
---|
946 | const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
|
---|
947 | PCX86PML4E pPml4e = *ppPml4e = &pGuestPml4->a[iPml4];
|
---|
948 | if (!pPml4e->n.u1Present)
|
---|
949 | return NULL;
|
---|
950 | if (RT_UNLIKELY(pPml4e->u & pVCpu->pgm.s.fGstAmd64MbzPml4eMask))
|
---|
951 | return NULL;
|
---|
952 |
|
---|
953 | /* The PDPE. */
|
---|
954 | PCX86PDPT pPdptTemp;
|
---|
955 | int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, pPml4e->u & X86_PML4E_PG_MASK, &pPdptTemp);
|
---|
956 | if (RT_FAILURE(rc))
|
---|
957 | {
|
---|
958 | AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
|
---|
959 | return NULL;
|
---|
960 | }
|
---|
961 | const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
|
---|
962 | *pPdpe = pPdptTemp->a[iPdpt];
|
---|
963 | if (!pPdpe->n.u1Present)
|
---|
964 | return NULL;
|
---|
965 | if (RT_UNLIKELY(pPdpe->u & pVCpu->pgm.s.fGstAmd64MbzPdpeMask))
|
---|
966 | return NULL;
|
---|
967 |
|
---|
968 | /* The PDE. */
|
---|
969 | PX86PDPAE pPD;
|
---|
970 | rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
|
---|
971 | if (RT_FAILURE(rc))
|
---|
972 | {
|
---|
973 | AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
|
---|
974 | return NULL;
|
---|
975 | }
|
---|
976 |
|
---|
977 | *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
|
---|
978 | return pPD;
|
---|
979 | }
|
---|
980 |
|
---|
981 | #endif /* !IN_RC */
|
---|
982 |
|
---|
983 | /**
|
---|
984 | * Gets the shadow page directory, 32-bit.
|
---|
985 | *
|
---|
986 | * @returns Pointer to the shadow 32-bit PD.
|
---|
987 | * @param pVCpu Pointer to the VMCPU.
|
---|
988 | */
|
---|
989 | DECLINLINE(PX86PD) pgmShwGet32BitPDPtr(PVMCPU pVCpu)
|
---|
990 | {
|
---|
991 | return (PX86PD)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
|
---|
992 | }
|
---|
993 |
|
---|
994 |
|
---|
995 | /**
|
---|
996 | * Gets the shadow page directory entry for the specified address, 32-bit.
|
---|
997 | *
|
---|
998 | * @returns Shadow 32-bit PDE.
|
---|
999 | * @param pVCpu Pointer to the VMCPU.
|
---|
1000 | * @param GCPtr The address.
|
---|
1001 | */
|
---|
1002 | DECLINLINE(X86PDE) pgmShwGet32BitPDE(PVMCPU pVCpu, RTGCPTR GCPtr)
|
---|
1003 | {
|
---|
1004 | const unsigned iPd = (GCPtr >> X86_PD_SHIFT) & X86_PD_MASK;
|
---|
1005 |
|
---|
1006 | PX86PD pShwPde = pgmShwGet32BitPDPtr(pVCpu);
|
---|
1007 | if (!pShwPde)
|
---|
1008 | {
|
---|
1009 | X86PDE ZeroPde = {0};
|
---|
1010 | return ZeroPde;
|
---|
1011 | }
|
---|
1012 | return pShwPde->a[iPd];
|
---|
1013 | }
|
---|
1014 |
|
---|
1015 |
|
---|
1016 | /**
|
---|
1017 | * Gets the pointer to the shadow page directory entry for the specified
|
---|
1018 | * address, 32-bit.
|
---|
1019 | *
|
---|
1020 | * @returns Pointer to the shadow 32-bit PDE.
|
---|
1021 | * @param pVCpu Pointer to the VMCPU.
|
---|
1022 | * @param GCPtr The address.
|
---|
1023 | */
|
---|
1024 | DECLINLINE(PX86PDE) pgmShwGet32BitPDEPtr(PVMCPU pVCpu, RTGCPTR GCPtr)
|
---|
1025 | {
|
---|
1026 | const unsigned iPd = (GCPtr >> X86_PD_SHIFT) & X86_PD_MASK;
|
---|
1027 |
|
---|
1028 | PX86PD pPde = pgmShwGet32BitPDPtr(pVCpu);
|
---|
1029 | AssertReturn(pPde, NULL);
|
---|
1030 | return &pPde->a[iPd];
|
---|
1031 | }
|
---|
1032 |
|
---|
1033 |
|
---|
1034 | /**
|
---|
1035 | * Gets the shadow page pointer table, PAE.
|
---|
1036 | *
|
---|
1037 | * @returns Pointer to the shadow PAE PDPT.
|
---|
1038 | * @param pVCpu Pointer to the VMCPU.
|
---|
1039 | */
|
---|
1040 | DECLINLINE(PX86PDPT) pgmShwGetPaePDPTPtr(PVMCPU pVCpu)
|
---|
1041 | {
|
---|
1042 | return (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
|
---|
1043 | }
|
---|
1044 |
|
---|
1045 |
|
---|
1046 | /**
|
---|
1047 | * Gets the shadow page directory for the specified address, PAE.
|
---|
1048 | *
|
---|
1049 | * @returns Pointer to the shadow PD.
|
---|
1050 | * @param pVCpu Pointer to the VMCPU.
|
---|
1051 | * @param GCPtr The address.
|
---|
1052 | */
|
---|
1053 | DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr)
|
---|
1054 | {
|
---|
1055 | const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
|
---|
1056 | PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
|
---|
1057 |
|
---|
1058 | if (!pPdpt->a[iPdpt].n.u1Present)
|
---|
1059 | return NULL;
|
---|
1060 |
|
---|
1061 | /* Fetch the pgm pool shadow descriptor. */
|
---|
1062 | PVM pVM = pVCpu->CTX_SUFF(pVM);
|
---|
1063 | PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), pPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
|
---|
1064 | AssertReturn(pShwPde, NULL);
|
---|
1065 |
|
---|
1066 | return (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPde);
|
---|
1067 | }
|
---|
1068 |
|
---|
1069 |
|
---|
1070 | /**
|
---|
1071 | * Gets the shadow page directory for the specified address, PAE.
|
---|
1072 | *
|
---|
1073 | * @returns Pointer to the shadow PD.
|
---|
1074 | * @param pVCpu Pointer to the VMCPU.
|
---|
1075 | * @param GCPtr The address.
|
---|
1076 | */
|
---|
1077 | DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PVMCPU pVCpu, PX86PDPT pPdpt, RTGCPTR GCPtr)
|
---|
1078 | {
|
---|
1079 | const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
|
---|
1080 |
|
---|
1081 | if (!pPdpt->a[iPdpt].n.u1Present)
|
---|
1082 | return NULL;
|
---|
1083 |
|
---|
1084 | /* Fetch the pgm pool shadow descriptor. */
|
---|
1085 | PVM pVM = pVCpu->CTX_SUFF(pVM);
|
---|
1086 | PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), pPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
|
---|
1087 | AssertReturn(pShwPde, NULL);
|
---|
1088 |
|
---|
1089 | return (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPde);
|
---|
1090 | }
|
---|
1091 |
|
---|
1092 |
|
---|
1093 | /**
|
---|
1094 | * Gets the shadow page directory entry, PAE.
|
---|
1095 | *
|
---|
1096 | * @returns PDE.
|
---|
1097 | * @param pVCpu Pointer to the VMCPU.
|
---|
1098 | * @param GCPtr The address.
|
---|
1099 | */
|
---|
1100 | DECLINLINE(X86PDEPAE) pgmShwGetPaePDE(PVMCPU pVCpu, RTGCPTR GCPtr)
|
---|
1101 | {
|
---|
1102 | const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
|
---|
1103 |
|
---|
1104 | PX86PDPAE pShwPde = pgmShwGetPaePDPtr(pVCpu, GCPtr);
|
---|
1105 | if (!pShwPde)
|
---|
1106 | {
|
---|
1107 | X86PDEPAE ZeroPde = {0};
|
---|
1108 | return ZeroPde;
|
---|
1109 | }
|
---|
1110 | return pShwPde->a[iPd];
|
---|
1111 | }
|
---|
1112 |
|
---|
1113 |
|
---|
1114 | /**
|
---|
1115 | * Gets the pointer to the shadow page directory entry for an address, PAE.
|
---|
1116 | *
|
---|
1117 | * @returns Pointer to the PDE.
|
---|
1118 | * @param pVCpu Pointer to the VMCPU.
|
---|
1119 | * @param GCPtr The address.
|
---|
1120 | * @remarks Only used by AssertCR3.
|
---|
1121 | */
|
---|
1122 | DECLINLINE(PX86PDEPAE) pgmShwGetPaePDEPtr(PVMCPU pVCpu, RTGCPTR GCPtr)
|
---|
1123 | {
|
---|
1124 | const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
|
---|
1125 |
|
---|
1126 | PX86PDPAE pPde = pgmShwGetPaePDPtr(pVCpu, GCPtr);
|
---|
1127 | AssertReturn(pPde, NULL);
|
---|
1128 | return &pPde->a[iPd];
|
---|
1129 | }
|
---|
1130 |
|
---|
1131 | #ifndef IN_RC
|
---|
1132 |
|
---|
1133 | /**
|
---|
1134 | * Gets the shadow page map level-4 pointer.
|
---|
1135 | *
|
---|
1136 | * @returns Pointer to the shadow PML4.
|
---|
1137 | * @param pVCpu Pointer to the VMCPU.
|
---|
1138 | */
|
---|
1139 | DECLINLINE(PX86PML4) pgmShwGetLongModePML4Ptr(PVMCPU pVCpu)
|
---|
1140 | {
|
---|
1141 | return (PX86PML4)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
|
---|
1142 | }
|
---|
1143 |
|
---|
1144 |
|
---|
1145 | /**
|
---|
1146 | * Gets the shadow page map level-4 entry for the specified address.
|
---|
1147 | *
|
---|
1148 | * @returns The entry.
|
---|
1149 | * @param pVCpu Pointer to the VMCPU.
|
---|
1150 | * @param GCPtr The address.
|
---|
1151 | */
|
---|
1152 | DECLINLINE(X86PML4E) pgmShwGetLongModePML4E(PVMCPU pVCpu, RTGCPTR GCPtr)
|
---|
1153 | {
|
---|
1154 | const unsigned iPml4 = ((RTGCUINTPTR64)GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
|
---|
1155 | PX86PML4 pShwPml4 = pgmShwGetLongModePML4Ptr(pVCpu);
|
---|
1156 |
|
---|
1157 | if (!pShwPml4)
|
---|
1158 | {
|
---|
1159 | X86PML4E ZeroPml4e = {0};
|
---|
1160 | return ZeroPml4e;
|
---|
1161 | }
|
---|
1162 | return pShwPml4->a[iPml4];
|
---|
1163 | }
|
---|
1164 |
|
---|
1165 |
|
---|
1166 | /**
|
---|
1167 | * Gets the pointer to the specified shadow page map level-4 entry.
|
---|
1168 | *
|
---|
1169 | * @returns The entry.
|
---|
1170 | * @param pVCpu Pointer to the VMCPU.
|
---|
1171 | * @param iPml4 The PML4 index.
|
---|
1172 | */
|
---|
1173 | DECLINLINE(PX86PML4E) pgmShwGetLongModePML4EPtr(PVMCPU pVCpu, unsigned int iPml4)
|
---|
1174 | {
|
---|
1175 | PX86PML4 pShwPml4 = pgmShwGetLongModePML4Ptr(pVCpu);
|
---|
1176 | if (!pShwPml4)
|
---|
1177 | return NULL;
|
---|
1178 | return &pShwPml4->a[iPml4];
|
---|
1179 | }
|
---|
1180 |
|
---|
1181 | #endif /* !IN_RC */
|
---|
1182 |
|
---|
1183 | /**
|
---|
1184 | * Cached physical handler lookup.
|
---|
1185 | *
|
---|
1186 | * @returns Physical handler covering @a GCPhys.
|
---|
1187 | * @param pVM Pointer to the VM.
|
---|
1188 | * @param GCPhys The lookup address.
|
---|
1189 | */
|
---|
1190 | DECLINLINE(PPGMPHYSHANDLER) pgmHandlerPhysicalLookup(PVM pVM, RTGCPHYS GCPhys)
|
---|
1191 | {
|
---|
1192 | PPGMPHYSHANDLER pHandler = pVM->pgm.s.CTX_SUFF(pLastPhysHandler);
|
---|
1193 | if ( pHandler
|
---|
1194 | && GCPhys >= pHandler->Core.Key
|
---|
1195 | && GCPhys < pHandler->Core.KeyLast)
|
---|
1196 | {
|
---|
1197 | STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysHandlerLookupHits));
|
---|
1198 | return pHandler;
|
---|
1199 | }
|
---|
1200 |
|
---|
1201 | STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysHandlerLookupMisses));
|
---|
1202 | pHandler = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
|
---|
1203 | if (pHandler)
|
---|
1204 | pVM->pgm.s.CTX_SUFF(pLastPhysHandler) = pHandler;
|
---|
1205 | return pHandler;
|
---|
1206 | }
|
---|
1207 |
|
---|
1208 |
|
---|
1209 | #ifdef VBOX_WITH_RAW_MODE
|
---|
1210 | /**
|
---|
1211 | * Clears one physical page of a virtual handler.
|
---|
1212 | *
|
---|
1213 | * @param pVM Pointer to the VM.
|
---|
1214 | * @param pCur Virtual handler structure.
|
---|
1215 | * @param iPage Physical page index.
|
---|
1216 | *
|
---|
1217 | * @remark Only used when PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL is being set, so no
|
---|
1218 | * need to care about other handlers in the same page.
|
---|
1219 | */
|
---|
1220 | DECLINLINE(void) pgmHandlerVirtualClearPage(PVM pVM, PPGMVIRTHANDLER pCur, unsigned iPage)
|
---|
1221 | {
|
---|
1222 | const PPGMPHYS2VIRTHANDLER pPhys2Virt = &pCur->aPhysToVirt[iPage];
|
---|
1223 |
|
---|
1224 | /*
|
---|
1225 | * Remove the node from the tree (it's supposed to be in the tree if we get here!).
|
---|
1226 | */
|
---|
1227 | # ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
|
---|
1228 | AssertReleaseMsg(pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_IN_TREE,
|
---|
1229 | ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
|
---|
1230 | pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias));
|
---|
1231 | # endif
|
---|
1232 | if (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_IS_HEAD)
|
---|
1233 | {
|
---|
1234 | /* We're the head of the alias chain. */
|
---|
1235 | PPGMPHYS2VIRTHANDLER pRemove = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key); NOREF(pRemove);
|
---|
1236 | # ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
|
---|
1237 | AssertReleaseMsg(pRemove != NULL,
|
---|
1238 | ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
|
---|
1239 | pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias));
|
---|
1240 | AssertReleaseMsg(pRemove == pPhys2Virt,
|
---|
1241 | ("wanted: pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n"
|
---|
1242 | " got: pRemove=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
|
---|
1243 | pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias,
|
---|
1244 | pRemove, pRemove->Core.Key, pRemove->Core.KeyLast, pRemove->offVirtHandler, pRemove->offNextAlias));
|
---|
1245 | # endif
|
---|
1246 | if (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK)
|
---|
1247 | {
|
---|
1248 | /* Insert the next list in the alias chain into the tree. */
|
---|
1249 | PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
|
---|
1250 | # ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
|
---|
1251 | AssertReleaseMsg(pNext->offNextAlias & PGMPHYS2VIRTHANDLER_IN_TREE,
|
---|
1252 | ("pNext=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
|
---|
1253 | pNext, pNext->Core.Key, pNext->Core.KeyLast, pNext->offVirtHandler, pNext->offNextAlias));
|
---|
1254 | # endif
|
---|
1255 | pNext->offNextAlias |= PGMPHYS2VIRTHANDLER_IS_HEAD;
|
---|
1256 | bool fRc = RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, &pNext->Core);
|
---|
1257 | AssertRelease(fRc);
|
---|
1258 | }
|
---|
1259 | }
|
---|
1260 | else
|
---|
1261 | {
|
---|
1262 | /* Locate the previous node in the alias chain. */
|
---|
1263 | PPGMPHYS2VIRTHANDLER pPrev = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key);
|
---|
1264 | # ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
|
---|
1265 | AssertReleaseMsg(pPrev != pPhys2Virt,
|
---|
1266 | ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} pPrev=%p\n",
|
---|
1267 | pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias, pPrev));
|
---|
1268 | # endif
|
---|
1269 | for (;;)
|
---|
1270 | {
|
---|
1271 | PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPrev + (pPrev->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
|
---|
1272 | if (pNext == pPhys2Virt)
|
---|
1273 | {
|
---|
1274 | /* unlink. */
|
---|
1275 | LogFlow(("pgmHandlerVirtualClearPage: removed %p:{.offNextAlias=%#RX32} from alias chain. prev %p:{.offNextAlias=%#RX32} [%RGp-%RGp]\n",
|
---|
1276 | pPhys2Virt, pPhys2Virt->offNextAlias, pPrev, pPrev->offNextAlias, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast));
|
---|
1277 | if (!(pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK))
|
---|
1278 | pPrev->offNextAlias &= ~PGMPHYS2VIRTHANDLER_OFF_MASK;
|
---|
1279 | else
|
---|
1280 | {
|
---|
1281 | PPGMPHYS2VIRTHANDLER pNewNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
|
---|
1282 | pPrev->offNextAlias = ((intptr_t)pNewNext - (intptr_t)pPrev)
|
---|
1283 | | (pPrev->offNextAlias & ~PGMPHYS2VIRTHANDLER_OFF_MASK);
|
---|
1284 | }
|
---|
1285 | break;
|
---|
1286 | }
|
---|
1287 |
|
---|
1288 | /* next */
|
---|
1289 | if (pNext == pPrev)
|
---|
1290 | {
|
---|
1291 | # ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
|
---|
1292 | AssertReleaseMsg(pNext != pPrev,
|
---|
1293 | ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} pPrev=%p\n",
|
---|
1294 | pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias, pPrev));
|
---|
1295 | # endif
|
---|
1296 | break;
|
---|
1297 | }
|
---|
1298 | pPrev = pNext;
|
---|
1299 | }
|
---|
1300 | }
|
---|
1301 | Log2(("PHYS2VIRT: Removing %RGp-%RGp %#RX32 %s\n",
|
---|
1302 | pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias, R3STRING(pCur->pszDesc)));
|
---|
1303 | pPhys2Virt->offNextAlias = 0;
|
---|
1304 | pPhys2Virt->Core.KeyLast = NIL_RTGCPHYS; /* require reinsert */
|
---|
1305 |
|
---|
1306 | /*
|
---|
1307 | * Clear the ram flags for this page.
|
---|
1308 | */
|
---|
1309 | PPGMPAGE pPage = pgmPhysGetPage(pVM, pPhys2Virt->Core.Key);
|
---|
1310 | AssertReturnVoid(pPage);
|
---|
1311 | PGM_PAGE_SET_HNDL_VIRT_STATE(pPage, PGM_PAGE_HNDL_VIRT_STATE_NONE);
|
---|
1312 | }
|
---|
1313 | #endif /* VBOX_WITH_RAW_MODE */
|
---|
1314 |
|
---|
1315 |
|
---|
1316 | /**
|
---|
1317 | * Internal worker for finding a 'in-use' shadow page give by it's physical address.
|
---|
1318 | *
|
---|
1319 | * @returns Pointer to the shadow page structure.
|
---|
1320 | * @param pPool The pool.
|
---|
1321 | * @param idx The pool page index.
|
---|
1322 | */
|
---|
1323 | DECLINLINE(PPGMPOOLPAGE) pgmPoolGetPageByIdx(PPGMPOOL pPool, unsigned idx)
|
---|
1324 | {
|
---|
1325 | AssertFatalMsg(idx >= PGMPOOL_IDX_FIRST && idx < pPool->cCurPages, ("idx=%d\n", idx));
|
---|
1326 | return &pPool->aPages[idx];
|
---|
1327 | }
|
---|
1328 |
|
---|
1329 |
|
---|
1330 | /**
|
---|
1331 | * Clear references to guest physical memory.
|
---|
1332 | *
|
---|
1333 | * @param pPool The pool.
|
---|
1334 | * @param pPoolPage The pool page.
|
---|
1335 | * @param pPhysPage The physical guest page tracking structure.
|
---|
1336 | * @param iPte Shadow PTE index
|
---|
1337 | */
|
---|
1338 | DECLINLINE(void) pgmTrackDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPoolPage, PPGMPAGE pPhysPage, uint16_t iPte)
|
---|
1339 | {
|
---|
1340 | /*
|
---|
1341 | * Just deal with the simple case here.
|
---|
1342 | */
|
---|
1343 | # ifdef VBOX_STRICT
|
---|
1344 | PVM pVM = pPool->CTX_SUFF(pVM); NOREF(pVM);
|
---|
1345 | # endif
|
---|
1346 | # ifdef LOG_ENABLED
|
---|
1347 | const unsigned uOrg = PGM_PAGE_GET_TRACKING(pPhysPage);
|
---|
1348 | # endif
|
---|
1349 | const unsigned cRefs = PGM_PAGE_GET_TD_CREFS(pPhysPage);
|
---|
1350 | if (cRefs == 1)
|
---|
1351 | {
|
---|
1352 | Assert(pPoolPage->idx == PGM_PAGE_GET_TD_IDX(pPhysPage));
|
---|
1353 | Assert(iPte == PGM_PAGE_GET_PTE_INDEX(pPhysPage));
|
---|
1354 | /* Invalidate the tracking data. */
|
---|
1355 | PGM_PAGE_SET_TRACKING(pVM, pPhysPage, 0);
|
---|
1356 | }
|
---|
1357 | else
|
---|
1358 | pgmPoolTrackPhysExtDerefGCPhys(pPool, pPoolPage, pPhysPage, iPte);
|
---|
1359 | Log2(("pgmTrackDerefGCPhys: %x -> %x pPhysPage=%R[pgmpage]\n", uOrg, PGM_PAGE_GET_TRACKING(pPhysPage), pPhysPage ));
|
---|
1360 | }
|
---|
1361 |
|
---|
1362 |
|
---|
1363 | /**
|
---|
1364 | * Moves the page to the head of the age list.
|
---|
1365 | *
|
---|
1366 | * This is done when the cached page is used in one way or another.
|
---|
1367 | *
|
---|
1368 | * @param pPool The pool.
|
---|
1369 | * @param pPage The cached page.
|
---|
1370 | */
|
---|
1371 | DECLINLINE(void) pgmPoolCacheUsed(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
|
---|
1372 | {
|
---|
1373 | PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM));
|
---|
1374 |
|
---|
1375 | /*
|
---|
1376 | * Move to the head of the age list.
|
---|
1377 | */
|
---|
1378 | if (pPage->iAgePrev != NIL_PGMPOOL_IDX)
|
---|
1379 | {
|
---|
1380 | /* unlink */
|
---|
1381 | pPool->aPages[pPage->iAgePrev].iAgeNext = pPage->iAgeNext;
|
---|
1382 | if (pPage->iAgeNext != NIL_PGMPOOL_IDX)
|
---|
1383 | pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->iAgePrev;
|
---|
1384 | else
|
---|
1385 | pPool->iAgeTail = pPage->iAgePrev;
|
---|
1386 |
|
---|
1387 | /* insert at head */
|
---|
1388 | pPage->iAgePrev = NIL_PGMPOOL_IDX;
|
---|
1389 | pPage->iAgeNext = pPool->iAgeHead;
|
---|
1390 | Assert(pPage->iAgeNext != NIL_PGMPOOL_IDX); /* we would've already been head then */
|
---|
1391 | pPool->iAgeHead = pPage->idx;
|
---|
1392 | pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->idx;
|
---|
1393 | }
|
---|
1394 | }
|
---|
1395 |
|
---|
1396 |
|
---|
1397 | /**
|
---|
1398 | * Locks a page to prevent flushing (important for cr3 root pages or shadow pae pd pages).
|
---|
1399 | *
|
---|
1400 | * @param pVM Pointer to the VM.
|
---|
1401 | * @param pPage PGM pool page
|
---|
1402 | */
|
---|
1403 | DECLINLINE(void) pgmPoolLockPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
|
---|
1404 | {
|
---|
1405 | PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM));
|
---|
1406 | ASMAtomicIncU32(&pPage->cLocked);
|
---|
1407 | }
|
---|
1408 |
|
---|
1409 |
|
---|
1410 | /**
|
---|
1411 | * Unlocks a page to allow flushing again
|
---|
1412 | *
|
---|
1413 | * @param pVM Pointer to the VM.
|
---|
1414 | * @param pPage PGM pool page
|
---|
1415 | */
|
---|
1416 | DECLINLINE(void) pgmPoolUnlockPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
|
---|
1417 | {
|
---|
1418 | PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM));
|
---|
1419 | Assert(pPage->cLocked);
|
---|
1420 | ASMAtomicDecU32(&pPage->cLocked);
|
---|
1421 | }
|
---|
1422 |
|
---|
1423 |
|
---|
1424 | /**
|
---|
1425 | * Checks if the page is locked (e.g. the active CR3 or one of the four PDs of a PAE PDPT)
|
---|
1426 | *
|
---|
1427 | * @returns VBox status code.
|
---|
1428 | * @param pPage PGM pool page
|
---|
1429 | */
|
---|
1430 | DECLINLINE(bool) pgmPoolIsPageLocked(PPGMPOOLPAGE pPage)
|
---|
1431 | {
|
---|
1432 | if (pPage->cLocked)
|
---|
1433 | {
|
---|
1434 | LogFlow(("pgmPoolIsPageLocked found root page %d\n", pPage->enmKind));
|
---|
1435 | if (pPage->cModifications)
|
---|
1436 | pPage->cModifications = 1; /* reset counter (can't use 0, or else it will be reinserted in the modified list) */
|
---|
1437 | return true;
|
---|
1438 | }
|
---|
1439 | return false;
|
---|
1440 | }
|
---|
1441 |
|
---|
1442 |
|
---|
1443 | /**
|
---|
1444 | * Tells if mappings are to be put into the shadow page table or not.
|
---|
1445 | *
|
---|
1446 | * @returns boolean result
|
---|
1447 | * @param pVM Pointer to the VM.
|
---|
1448 | */
|
---|
1449 | DECL_FORCE_INLINE(bool) pgmMapAreMappingsEnabled(PVM pVM)
|
---|
1450 | {
|
---|
1451 | #ifdef PGM_WITHOUT_MAPPINGS
|
---|
1452 | /* There are no mappings in VT-x and AMD-V mode. */
|
---|
1453 | Assert(HMIsEnabled(pVM));
|
---|
1454 | return false;
|
---|
1455 | #else
|
---|
1456 | Assert(pVM->cCpus == 1 || HMIsEnabled(pVM));
|
---|
1457 | return !HMIsEnabled(pVM);
|
---|
1458 | #endif
|
---|
1459 | }
|
---|
1460 |
|
---|
1461 |
|
---|
1462 | /**
|
---|
1463 | * Checks if the mappings are floating and enabled.
|
---|
1464 | *
|
---|
1465 | * @returns true / false.
|
---|
1466 | * @param pVM Pointer to the VM.
|
---|
1467 | */
|
---|
1468 | DECL_FORCE_INLINE(bool) pgmMapAreMappingsFloating(PVM pVM)
|
---|
1469 | {
|
---|
1470 | #ifdef PGM_WITHOUT_MAPPINGS
|
---|
1471 | /* There are no mappings in VT-x and AMD-V mode. */
|
---|
1472 | Assert(HMIsEnabled(pVM));
|
---|
1473 | return false;
|
---|
1474 | #else
|
---|
1475 | return !pVM->pgm.s.fMappingsFixed
|
---|
1476 | && pgmMapAreMappingsEnabled(pVM);
|
---|
1477 | #endif
|
---|
1478 | }
|
---|
1479 |
|
---|
1480 | /** @} */
|
---|
1481 |
|
---|
1482 | #endif
|
---|
1483 |
|
---|