Changeset 31402 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Aug 5, 2010 12:28:18 PM (15 years ago)
- svn:sync-xref-src-repo-rev:
- 64465
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/MMAllPagePool.cpp
r28800 r31402 39 39 40 40 41 #if ndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R041 #if !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) && !defined(IN_RC) 42 42 43 43 /** -
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r31170 r31402 936 936 PGMPOOLKIND enmKind; 937 937 938 # if defined(IN_RC)939 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */940 PGMDynLockHCPage(pVM, (uint8_t *)pPdpe);941 # endif942 943 938 if (pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu)) 944 939 { … … 990 985 */ 991 986 ASMReloadCR3(); 992 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdpe);993 987 # endif 988 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdpe); 994 989 } 995 990 else … … 1524 1519 #endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */ 1525 1520 #if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 1526 1527 1521 /** 1528 1522 * Performs the lazy mapping of the 32-bit guest PD. … … 1563 1557 return rc; 1564 1558 } 1565 1566 1559 #endif 1567 1560 … … 2272 2265 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 2273 2266 2274 /** Common worker for PGMDynMapGCPage and PGMDynMapGCPageOff. */ 2275 DECLINLINE(int) pgmDynMapGCPageInternal(PVM pVM, RTGCPHYS GCPhys, void **ppv) 2267 /** 2268 * Common worker for pgmRZDynMapGCPageOffInlined and pgmRZDynMapGCPageV2Inlined. 2269 * 2270 * @returns VBox status code. 2271 * @param pVM The VM handle. 2272 * @param pVCpu The current CPU. 2273 * @param GCPhys The guest physical address of the page to map. The 2274 * offset bits are not ignored. 2275 * @param ppv Where to return the address corresponding to @a GCPhys. 2276 */ 2277 int pgmRZDynMapGCPageCommon(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL) 2276 2278 { 2277 2279 pgmLock(pVM); 2278 2280 2279 2281 /* 2280 * Convert it to a writable page and it on to PGMDynMapHCPage.2282 * Convert it to a writable page and it on to the dynamic mapper. 2281 2283 */ 2282 2284 int rc; … … 2287 2289 if (RT_SUCCESS(rc)) 2288 2290 { 2289 //Log(("PGMDynMapGCPage: GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, pPage)); 2290 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 2291 rc = pgmR0DynMapHCPageInlined(VMMGetCpu(pVM), PGM_PAGE_GET_HCPHYS(pPage), ppv); 2292 #else 2293 rc = PGMDynMapHCPage(pVM, PGM_PAGE_GET_HCPHYS(pPage), ppv); 2294 #endif 2291 void *pv; 2292 rc = pgmRZDynMapHCPageInlined(pVCpu, PGM_PAGE_GET_HCPHYS(pPage), &pv RTLOG_COMMA_SRC_POS_ARGS); 2293 if (RT_SUCCESS(rc)) 2294 *ppv = (void *)((uintptr_t)pv | ((uintptr_t)GCPhys & PAGE_OFFSET_MASK)); 2295 2295 } 2296 2296 else … … 2307 2307 } 2308 2308 2309 /**2310 * Temporarily maps one guest page specified by GC physical address.2311 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.2312 *2313 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is2314 * reused after 8 mappings (or perhaps a few more if you score with the cache).2315 *2316 * @returns VBox status.2317 * @param pVM VM handle.2318 * @param GCPhys GC Physical address of the page.2319 * @param ppv Where to store the address of the mapping.2320 */2321 VMMDECL(int) PGMDynMapGCPage(PVM pVM, RTGCPHYS GCPhys, void **ppv)2322 {2323 AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("GCPhys=%RGp\n", GCPhys));2324 return pgmDynMapGCPageInternal(pVM, GCPhys, ppv);2325 }2326 2327 2328 /**2329 * Temporarily maps one guest page specified by unaligned GC physical address.2330 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.2331 *2332 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is2333 * reused after 8 mappings (or perhaps a few more if you score with the cache).2334 *2335 * The caller is aware that only the speicifed page is mapped and that really bad things2336 * will happen if writing beyond the page!2337 *2338 * @returns VBox status.2339 * @param pVM VM handle.2340 * @param GCPhys GC Physical address within the page to be mapped.2341 * @param ppv Where to store the address of the mapping address corresponding to GCPhys.2342 */2343 VMMDECL(int) PGMDynMapGCPageOff(PVM pVM, RTGCPHYS GCPhys, void **ppv)2344 {2345 void *pv;2346 int rc = pgmDynMapGCPageInternal(pVM, GCPhys, &pv);2347 if (RT_SUCCESS(rc))2348 {2349 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));2350 return VINF_SUCCESS;2351 }2352 return rc;2353 }2354 2355 # ifdef IN_RC2356 2357 /**2358 * Temporarily maps one host page specified by HC physical address.2359 *2360 * Be WARNED that the dynamic page mapping area is small, 16 pages, thus the space is2361 * reused after 16 mappings (or perhaps a few more if you score with the cache).2362 *2363 * @returns VINF_SUCCESS, will bail out to ring-3 on failure.2364 * @param pVM VM handle.2365 * @param HCPhys HC Physical address of the page.2366 * @param ppv Where to store the address of the mapping. This is the2367 * address of the PAGE not the exact address corresponding2368 * to HCPhys. Use PGMDynMapHCPageOff if you care for the2369 * page offset.2370 */2371 VMMDECL(int) PGMDynMapHCPage(PVM pVM, RTHCPHYS HCPhys, void **ppv)2372 {2373 AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%RHp\n", HCPhys));2374 2375 /*2376 * Check the cache.2377 */2378 register unsigned iCache;2379 for (iCache = 0;iCache < RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache);iCache++)2380 {2381 static const uint8_t au8Trans[MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT][RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache)] =2382 {2383 { 0, 9, 10, 11, 12, 13, 14, 15},2384 { 0, 1, 10, 11, 12, 13, 14, 15},2385 { 0, 1, 2, 11, 12, 13, 14, 15},2386 { 0, 1, 2, 3, 12, 13, 14, 15},2387 { 0, 1, 2, 3, 4, 13, 14, 15},2388 { 0, 1, 2, 3, 4, 5, 14, 15},2389 { 0, 1, 2, 3, 4, 5, 6, 15},2390 { 0, 1, 2, 3, 4, 5, 6, 7},2391 { 8, 1, 2, 3, 4, 5, 6, 7},2392 { 8, 9, 2, 3, 4, 5, 6, 7},2393 { 8, 9, 10, 3, 4, 5, 6, 7},2394 { 8, 9, 10, 11, 4, 5, 6, 7},2395 { 8, 9, 10, 11, 12, 5, 6, 7},2396 { 8, 9, 10, 11, 12, 13, 6, 7},2397 { 8, 9, 10, 11, 12, 13, 14, 7},2398 { 8, 9, 10, 11, 12, 13, 14, 15},2399 };2400 AssertCompile(RT_ELEMENTS(au8Trans) == 16);2401 AssertCompile(RT_ELEMENTS(au8Trans[0]) == 8);2402 2403 if (pVM->pgm.s.aHCPhysDynPageMapCache[iCache] == HCPhys)2404 {2405 int iPage = au8Trans[pVM->pgm.s.iDynPageMapLast][iCache];2406 2407 /* The cache can get out of sync with locked entries. (10 locked, 2 overwrites its cache position, last = 11, lookup 2 -> page 10 instead of 2) */2408 if ((pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u & X86_PTE_PG_MASK) == HCPhys)2409 {2410 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);2411 *ppv = pv;2412 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRCDynMapCacheHits);2413 Log4(("PGMGCDynMapHCPage: HCPhys=%RHp pv=%p iPage=%d iCache=%d\n", HCPhys, pv, iPage, iCache));2414 return VINF_SUCCESS;2415 }2416 LogFlow(("Out of sync entry %d\n", iPage));2417 }2418 }2419 AssertCompile(RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) == 8);2420 AssertCompile((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) == 16);2421 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRCDynMapCacheMisses);2422 2423 /*2424 * Update the page tables.2425 */2426 unsigned iPage = pVM->pgm.s.iDynPageMapLast;2427 unsigned i;2428 for (i = 0; i < (MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT); i++)2429 {2430 pVM->pgm.s.iDynPageMapLast = iPage = (iPage + 1) & ((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) - 1);2431 if (!pVM->pgm.s.aLockedDynPageMapCache[iPage])2432 break;2433 iPage++;2434 }2435 AssertRelease(i != (MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT));2436 2437 pVM->pgm.s.aHCPhysDynPageMapCache[iPage & (RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) - 1)] = HCPhys;2438 pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u = (uint32_t)HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;2439 pVM->pgm.s.paDynPageMapPaePTEsGC[iPage].u = HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;2440 pVM->pgm.s.aLockedDynPageMapCache[iPage] = 0;2441 2442 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);2443 *ppv = pv;2444 ASMInvalidatePage(pv);2445 Log4(("PGMGCDynMapHCPage: HCPhys=%RHp pv=%p iPage=%d\n", HCPhys, pv, iPage));2446 return VINF_SUCCESS;2447 }2448 2449 2450 /**2451 * Temporarily lock a dynamic page to prevent it from being reused.2452 *2453 * @param pVM VM handle.2454 * @param GCPage GC address of page2455 */2456 VMMDECL(void) PGMDynLockHCPage(PVM pVM, RCPTRTYPE(uint8_t *) GCPage)2457 {2458 unsigned iPage;2459 2460 Assert(GCPage >= pVM->pgm.s.pbDynPageMapBaseGC && GCPage < (pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE));2461 iPage = ((uintptr_t)(GCPage - pVM->pgm.s.pbDynPageMapBaseGC)) >> PAGE_SHIFT;2462 ASMAtomicIncU32(&pVM->pgm.s.aLockedDynPageMapCache[iPage]);2463 Log4(("PGMDynLockHCPage %RRv iPage=%d\n", GCPage, iPage));2464 }2465 2466 2467 /**2468 * Unlock a dynamic page2469 *2470 * @param pVM VM handle.2471 * @param GCPage GC address of page2472 */2473 VMMDECL(void) PGMDynUnlockHCPage(PVM pVM, RCPTRTYPE(uint8_t *) GCPage)2474 {2475 unsigned iPage;2476 2477 AssertCompile(RT_ELEMENTS(pVM->pgm.s.aLockedDynPageMapCache) == 2* RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache));2478 AssertCompileMemberSize(VM, pgm.s.aLockedDynPageMapCache, sizeof(uint32_t) * (MM_HYPER_DYNAMIC_SIZE >> (PAGE_SHIFT)));2479 2480 Assert(GCPage >= pVM->pgm.s.pbDynPageMapBaseGC && GCPage < (pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE));2481 iPage = ((uintptr_t)(GCPage - pVM->pgm.s.pbDynPageMapBaseGC)) >> PAGE_SHIFT;2482 Assert(pVM->pgm.s.aLockedDynPageMapCache[iPage]);2483 ASMAtomicDecU32(&pVM->pgm.s.aLockedDynPageMapCache[iPage]);2484 Log4(("PGMDynUnlockHCPage %RRv iPage=%d\n", GCPage, iPage));2485 }2486 2487 2488 # ifdef VBOX_STRICT2489 /**2490 * Check for lock leaks.2491 *2492 * @param pVM VM handle.2493 */2494 VMMDECL(void) PGMDynCheckLocks(PVM pVM)2495 {2496 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.aLockedDynPageMapCache); i++)2497 Assert(!pVM->pgm.s.aLockedDynPageMapCache[i]);2498 }2499 # endif /* VBOX_STRICT */2500 2501 # endif /* IN_RC */2502 2309 #endif /* IN_RC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */ 2503 2504 2310 #if !defined(IN_R0) || defined(LOG_ENABLED) 2505 2311 -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r31207 r31402 383 383 *pfLockTaken = false; 384 384 385 # if defined(IN_RC) && defined(VBOX_STRICT)386 PGMDynCheckLocks(pVM);387 # endif388 389 385 # if ( PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT \ 390 386 || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64) \ … … 433 429 if (uErr & X86_TRAP_PF_RSVD) 434 430 { 431 /** @todo This is not complete code. take locks */ 435 432 Assert(uErr & X86_TRAP_PF_P); 436 433 PPGMPAGE pPage; … … 563 560 return VINF_SUCCESS; 564 561 } 565 #ifndef IN_RC566 562 AssertMsg(GstWalk.Pde.u == GstWalk.pPde->u || GstWalk.pPte->u == GstWalk.pPde->u, ("%RX64 %RX64\n", (uint64_t)GstWalk.Pde.u, (uint64_t)GstWalk.pPde->u)); 567 563 AssertMsg(GstWalk.Core.fBigPage || GstWalk.Pte.u == GstWalk.pPte->u, ("%RX64 %RX64\n", (uint64_t)GstWalk.Pte.u, (uint64_t)GstWalk.pPte->u)); 568 #else569 /* Ugly hack, proper fix is comming up later. */570 if ( !(GstWalk.Pde.u == GstWalk.pPde->u || GstWalk.pPte->u == GstWalk.pPde->u)571 || !(GstWalk.Core.fBigPage || GstWalk.Pte.u == GstWalk.pPte->u) )572 {573 rc = PGM_GST_NAME(Walk)(pVCpu, pvFault, &GstWalk);574 if (RT_FAILURE_NP(rc))575 return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerGuestFault)(pVCpu, &GstWalk, uErr));576 }577 #endif578 564 } 579 565 … … 1148 1134 } 1149 1135 1150 # if defined(IN_RC)1151 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */1152 PGMDynLockHCPage(pVM, (uint8_t *)pPdeDst);1153 # endif1154 1155 1136 /* 1156 1137 * Get the guest PD entry and calc big page. … … 1295 1276 LogFlow(("Skipping flush for big page containing %RGv (PD=%X .u=%RX64)-> nothing has changed!\n", GCPtrPage, iPDSrc, PdeSrc.u)); 1296 1277 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePage4MBPagesSkip)); 1297 # if defined(IN_RC) 1298 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */ 1299 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst); 1300 # endif 1278 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst); 1301 1279 return VINF_SUCCESS; 1302 1280 } … … 1335 1313 } 1336 1314 } 1337 # if defined(IN_RC) 1338 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */ 1339 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst); 1340 # endif 1315 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst); 1341 1316 return rc; 1342 1317 … … 1785 1760 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pPool, pPdptDst->a[iPdpt].u & X86_PDPE_PG_MASK); 1786 1761 Assert(pShwPde); 1787 # endif1788 1789 # if defined(IN_RC)1790 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */1791 PGMDynLockHCPage(pVM, (uint8_t *)pPdeDst);1792 1762 # endif 1793 1763 … … 2021 1991 } 2022 1992 } 2023 # if defined(IN_RC) 2024 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */ 2025 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst); 2026 # endif 1993 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst); 2027 1994 return VINF_SUCCESS; 2028 1995 } … … 2050 2017 ASMAtomicWriteSize(pPdeDst, 0); 2051 2018 2052 # if defined(IN_RC) 2053 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */ 2054 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst); 2055 # endif 2019 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst); 2056 2020 PGM_INVL_VCPU_TLBS(pVCpu); 2057 2021 return VINF_PGM_SYNCPAGE_MODIFIED_PDE; … … 2564 2528 Assert(!PdeDst.n.u1Present); /* We're only supposed to call SyncPT on PDE!P and conflicts.*/ 2565 2529 2566 # if defined(IN_RC)2567 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */2568 PGMDynLockHCPage(pVM, (uint8_t *)pPdeDst);2569 # endif2570 2571 2530 /* 2572 2531 * Sync page directory entry. … … 2646 2605 } 2647 2606 ASMAtomicWriteSize(pPdeDst, PdeDst.u); 2648 # if defined(IN_RC) 2649 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst); 2650 # endif 2607 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst); 2651 2608 return VINF_SUCCESS; 2652 2609 } … … 2654 2611 { 2655 2612 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); 2656 # if defined(IN_RC) 2657 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst); 2658 # endif 2613 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst); 2659 2614 return VINF_PGM_SYNC_CR3; 2660 2615 } … … 2687 2642 | (PdeSrc.u & ~(GST_PDE_PG_MASK | X86_PDE_AVL_MASK | X86_PDE_PCD | X86_PDE_PWT | X86_PDE_PS | X86_PDE4M_G | X86_PDE4M_D)); 2688 2643 ASMAtomicWriteSize(pPdeDst, PdeDst.u); 2689 # if defined(IN_RC) 2690 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst); 2691 # endif 2644 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst); 2692 2645 2693 2646 /* … … 2768 2721 2769 2722 /** 2770 * @todo It might be more efficient to sync only a part of the 4MB page (similar to what we do for 4kb PDs). 2723 * @todo It might be more efficient to sync only a part of the 4MB 2724 * page (similar to what we do for 4KB PDs). 2771 2725 */ 2772 2726 … … 2795 2749 } 2796 2750 ASMAtomicWriteSize(pPdeDst, PdeDst.u); 2797 # if defined(IN_RC) 2798 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst); 2799 # endif 2751 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst); 2800 2752 2801 2753 /* … … 3391 3343 # endif 3392 3344 3393 # if defined(IN_RC)3394 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */3395 PGMDynLockHCPage(pVM, (uint8_t *)pPdeDst);3396 # endif3397 3398 3345 if (!pPdeDst->n.u1Present) 3399 3346 { … … 3401 3348 if (rc != VINF_SUCCESS) 3402 3349 { 3403 # if defined(IN_RC) 3404 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */ 3405 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst); 3406 # endif 3350 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst); 3407 3351 pgmUnlock(pVM); 3408 3352 AssertRC(rc); … … 3449 3393 } 3450 3394 } 3451 # if defined(IN_RC) 3452 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */ 3453 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst); 3454 # endif 3395 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst); 3455 3396 pgmUnlock(pVM); 3456 3397 return rc; … … 4359 4300 AssertReturn(pPageCR3, VERR_INTERNAL_ERROR_2); 4360 4301 HCPhysGuestCR3 = PGM_PAGE_GET_HCPHYS(pPageCR3); 4361 /** @todo this needs some reworking wrt. locking .*/4302 /** @todo this needs some reworking wrt. locking? */ 4362 4303 # if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 4363 4304 HCPtrGuestCR3 = NIL_RTHCPTR; -
trunk/src/VBox/VMM/VMMAll/PGMAllMap.cpp
r31174 r31402 247 247 PX86PD pShw32BitPd = pgmShwGet32BitPDPtr(pVCpu); 248 248 AssertFatal(pShw32BitPd); 249 #ifdef IN_RC /* Lock mapping to prevent it from being reused during pgmPoolFree. */ 250 PGMDynLockHCPage(pVM, (uint8_t *)pShw32BitPd); 251 #endif 249 252 250 /* Free any previous user, unless it's us. */ 253 251 Assert( (pShw32BitPd->a[iNewPDE].u & (X86_PDE_P | PGM_PDFLAGS_MAPPING)) != (X86_PDE_P | PGM_PDFLAGS_MAPPING) … … 260 258 pShw32BitPd->a[iNewPDE].u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US 261 259 | (uint32_t)pMap->aPTs[i].HCPhysPT; 262 #ifdef IN_RC 263 /* Unlock dynamic mappings again. */ 264 PGMDynUnlockHCPage(pVM, (uint8_t *)pShw32BitPd); 265 #endif 260 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pShw32BitPd); 266 261 break; 267 262 } … … 274 269 PX86PDPT pShwPdpt = pgmShwGetPaePDPTPtr(pVCpu); 275 270 Assert(pShwPdpt); 276 #ifdef IN_RC /* Lock mapping to prevent it from being reused during pgmShwSyncPaePDPtr. */277 PGMDynLockHCPage(pVM, (uint8_t *)pShwPdpt);278 #endif279 271 280 272 /* … … 302 294 } 303 295 Assert(pShwPaePd); 304 #ifdef IN_RC /* Lock mapping to prevent it from being reused during pgmPoolFree. */305 PGMDynLockHCPage(pVM, (uint8_t *)pShwPaePd);306 #endif307 296 308 297 /* … … 357 346 pShwPdpt->a[iPdPt].u |= PGM_PLXFLAGS_MAPPING; 358 347 359 #ifdef IN_RC 360 /* Unlock dynamic mappings again. */ 361 PGMDynUnlockHCPage(pVM, (uint8_t *)pShwPaePd); 362 PGMDynUnlockHCPage(pVM, (uint8_t *)pShwPdpt); 363 #endif 348 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pShwPaePd); 349 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pShwPdpt); 364 350 break; 365 351 } … … 406 392 if ( PGMGetGuestMode(pVCpu) >= PGMMODE_PAE 407 393 && pShwPageCR3 != pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)) 408 {409 394 pCurrentShwPdpt = pgmShwGetPaePDPTPtr(pVCpu); 410 #ifdef IN_RC /* Lock mapping to prevent it from being reused (currently not possible). */411 if (pCurrentShwPdpt)412 PGMDynLockHCPage(pVM, (uint8_t *)pCurrentShwPdpt);413 #endif414 }415 395 416 396 unsigned i = pMap->cPTs; … … 503 483 } 504 484 } 505 #ifdef IN_RC 506 /* Unlock dynamic mappings again. */ 507 if (pCurrentShwPdpt) 508 PGMDynUnlockHCPage(pVM, (uint8_t *)pCurrentShwPdpt); 509 #endif 485 486 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pCurrentShwPdpt); 510 487 } 511 488 #endif /* !IN_RING0 */ -
trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
r31208 r31402 738 738 AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER); 739 739 740 #if def IN_RC740 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 741 741 /* 742 742 * Map it by HCPhys. 743 743 */ 744 return PGMDynMapHCPage(pVM, HCPhys, ppv); 745 746 #elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 747 /* 748 * Map it by HCPhys. 749 */ 750 return pgmR0DynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv); 744 return pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv RTLOG_COMMA_SRC_POS); 751 745 752 746 #else … … 824 818 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(pPage); 825 819 Assert(HCPhys != pVM->pgm.s.HCPhysZeroPg); 826 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 827 pgmR0DynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv); 828 # else 829 PGMDynMapHCPage(pVM, HCPhys, ppv); 830 # endif 820 pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv RTLOG_COMMA_SRC_POS); 831 821 return VINF_SUCCESS; 832 822 … … 1138 1128 */ 1139 1129 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 1140 *ppv = pgm DynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK));1130 *ppv = pgmRZDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK) RTLOG_COMMA_SRC_POS); 1141 1131 #else 1142 1132 PPGMPAGEMAPTLBE pTlbe; … … 1176 1166 */ 1177 1167 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 1178 *ppv = pgm DynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */1168 *ppv = pgmRZDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK) RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */ 1179 1169 #else 1180 1170 PPGMPAGEMAPTLBE pTlbe; … … 1234 1224 if (RT_SUCCESS(rc)) 1235 1225 { 1236 *ppv = pgm DynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */1226 *ppv = pgmRZDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK) RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */ 1237 1227 # if 0 1238 1228 pLock->pvMap = 0; … … 1345 1335 else 1346 1336 { 1347 *ppv = pgm DynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */1337 *ppv = pgmRZDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK) RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */ 1348 1338 # if 0 1349 1339 pLock->pvMap = 0; … … 1493 1483 pLock->u32Dummy = 0; 1494 1484 1495 #else /* IN_RING3 */1485 #else 1496 1486 PPGMPAGEMAP pMap = (PPGMPAGEMAP)pLock->pvMap; 1497 1487 PPGMPAGE pPage = (PPGMPAGE)(pLock->uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK); -
trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
r31170 r31402 88 88 } 89 89 90 /** @def PGMPOOL_PAGE_2_LOCKED_PTR91 * Maps a pool page pool into the current context and lock it (RC only).92 *93 * @returns VBox status code.94 * @param pVM The VM handle.95 * @param pPage The pool page.96 *97 * @remark In RC this uses PGMGCDynMapHCPage(), so it will consume of the98 * small page window employeed by that function. Be careful.99 * @remark There is no need to assert on the result.100 */101 #if defined(IN_RC)102 DECLINLINE(void *) PGMPOOL_PAGE_2_LOCKED_PTR(PVM pVM, PPGMPOOLPAGE pPage)103 {104 void *pv = pgmPoolMapPageInlined(pVM, pPage);105 106 /* Make sure the dynamic mapping will not be reused. */107 if (pv)108 PGMDynLockHCPage(pVM, (uint8_t *)pv);109 110 return pv;111 }112 #else113 # define PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage) PGMPOOL_PAGE_2_PTR(pVM, pPage)114 #endif115 116 /** @def PGMPOOL_UNLOCK_PTR117 * Unlock a previously locked dynamic caching (RC only).118 *119 * @returns VBox status code.120 * @param pVM The VM handle.121 * @param pPage The pool page.122 *123 * @remark In RC this uses PGMGCDynMapHCPage(), so it will consume of the124 * small page window employeed by that function. Be careful.125 * @remark There is no need to assert on the result.126 */127 #if defined(IN_RC)128 DECLINLINE(void) PGMPOOL_UNLOCK_PTR(PVM pVM, void *pvPage)129 {130 if (pvPage)131 PGMDynUnlockHCPage(pVM, (uint8_t *)pvPage);132 }133 #else134 # define PGMPOOL_UNLOCK_PTR(pVM, pPage) do {} while (0)135 #endif136 137 90 138 91 /** … … 247 200 { 248 201 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPT)); 249 uShw.pv = PGMPOOL_PAGE_2_ LOCKED_PTR(pVM, pPage);202 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage); 250 203 const unsigned iShw = off / sizeof(X86PTE); 251 204 LogFlow(("PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT iShw=%x\n", iShw)); … … 270 223 { 271 224 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPT)); 272 uShw.pv = PGMPOOL_PAGE_2_ LOCKED_PTR(pVM, pPage);225 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage); 273 226 if (!((off ^ pPage->GCPhys) & (PAGE_SIZE / 2))) 274 227 { … … 300 253 unsigned iShwPdpt = iGst / 256; 301 254 unsigned iShw = (iGst % 256) * 2; 302 uShw.pv = PGMPOOL_PAGE_2_ LOCKED_PTR(pVM, pPage);255 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage); 303 256 304 257 LogFlow(("pgmPoolMonitorChainChanging PAE for 32 bits: iGst=%x iShw=%x idx = %d page idx=%d\n", iGst, iShw, iShwPdpt, pPage->enmKind - PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD)); … … 363 316 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT: 364 317 { 365 uShw.pv = PGMPOOL_PAGE_2_ LOCKED_PTR(pVM, pPage);318 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage); 366 319 const unsigned iShw = off / sizeof(X86PTEPAE); 367 320 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPT)); … … 409 362 case PGMPOOLKIND_32BIT_PD: 410 363 { 411 uShw.pv = PGMPOOL_PAGE_2_ LOCKED_PTR(pVM, pPage);364 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage); 412 365 const unsigned iShw = off / sizeof(X86PTE); // ASSUMING 32-bit guest paging! 413 366 … … 489 442 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD: 490 443 { 491 uShw.pv = PGMPOOL_PAGE_2_ LOCKED_PTR(pVM, pPage);444 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage); 492 445 const unsigned iShw = off / sizeof(X86PDEPAE); 493 446 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPD)); … … 566 519 const unsigned offPdpt = GCPhysFault - pPage->GCPhys; 567 520 568 uShw.pv = PGMPOOL_PAGE_2_ LOCKED_PTR(pVM, pPage);521 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage); 569 522 const unsigned iShw = offPdpt / sizeof(X86PDPE); 570 523 if (iShw < X86_PG_PAE_PDPE_ENTRIES) /* don't use RT_ELEMENTS(uShw.pPDPT->a), because that's for long mode only */ … … 633 586 { 634 587 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPD)); 635 uShw.pv = PGMPOOL_PAGE_2_ LOCKED_PTR(pVM, pPage);588 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage); 636 589 const unsigned iShw = off / sizeof(X86PDEPAE); 637 590 Assert(!(uShw.pPDPae->a[iShw].u & PGM_PDFLAGS_MAPPING)); … … 673 626 * - messing with the bits of pd pointers without changing the physical address 674 627 */ 675 uShw.pv = PGMPOOL_PAGE_2_ LOCKED_PTR(pVM, pPage);628 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage); 676 629 const unsigned iShw = off / sizeof(X86PDPE); 677 630 if (uShw.pPDPT->a[iShw].n.u1Present) … … 703 656 * - messing with the bits of pd pointers without changing the physical address 704 657 */ 705 uShw.pv = PGMPOOL_PAGE_2_ LOCKED_PTR(pVM, pPage);658 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage); 706 659 const unsigned iShw = off / sizeof(X86PDPE); 707 660 if (uShw.pPML4->a[iShw].n.u1Present) … … 730 683 AssertFatalMsgFailed(("enmKind=%d\n", pPage->enmKind)); 731 684 } 732 PGM POOL_UNLOCK_PTR(pVM, uShw.pv);685 PGM_DYNMAP_UNUSED_HINT_VM(pVM, uShw.pv); 733 686 734 687 /* next */ … … 960 913 while (pRegFrame->rcx) 961 914 { 962 #if def VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0963 uint32_t iPrevSubset = PGM DynMapPushAutoSubset(pVCpu);915 #if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC) 916 uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu); 964 917 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, (RTGCPTR)pu32, uIncrement); 965 PGM DynMapPopAutoSubset(pVCpu, iPrevSubset);918 PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset); 966 919 #else 967 920 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, (RTGCPTR)pu32, uIncrement); … … 1012 965 * Clear all the pages. ASSUMES that pvFault is readable. 1013 966 */ 1014 #if def VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R01015 uint32_t iPrevSubset = PGM DynMapPushAutoSubset(pVCpu);967 #if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC) 968 uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu); 1016 969 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, pvFault, DISGetParamSize(pDis, &pDis->param1)); 1017 PGM DynMapPopAutoSubset(pVCpu, iPrevSubset);970 PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset); 1018 971 #else 1019 972 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, pvFault, DISGetParamSize(pDis, &pDis->param1)); … … 1113 1066 if (pPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT) 1114 1067 { 1115 void *pvShw = PGMPOOL_PAGE_2_ LOCKED_PTR(pPool->CTX_SUFF(pVM), pPage);1068 void *pvShw = PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage); 1116 1069 void *pvGst; 1117 1070 int rc = PGM_GCPHYS_2_PTR(pPool->CTX_SUFF(pVM), pPage->GCPhys, &pvGst); AssertReleaseRC(rc); … … 1421 1374 if (pTempPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT) 1422 1375 { 1423 PX86PTPAE pShwPT2 = (PX86PTPAE)PGMPOOL_PAGE_2_ LOCKED_PTR(pPool->CTX_SUFF(pVM), pTempPage);1376 PX86PTPAE pShwPT2 = (PX86PTPAE)PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pTempPage); 1424 1377 1425 1378 for (unsigned j = 0; j < RT_ELEMENTS(pShwPT->a); j++) … … 1539 1492 pPage->fDirty = false; 1540 1493 1541 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 1542 uint32_t iPrevSubset = PGMDynMapPushAutoSubset(VMMGetCpu(pVM)); 1494 #if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC) 1495 PVMCPU pVCpu = VMMGetCpu(pVM); 1496 uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu); 1543 1497 #endif 1544 1498 … … 1557 1511 /* Flush those PTEs that have changed. */ 1558 1512 STAM_PROFILE_START(&pPool->StatTrackDeref,a); 1559 void *pvShw = PGMPOOL_PAGE_2_ LOCKED_PTR(pPool->CTX_SUFF(pVM), pPage);1513 void *pvShw = PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage); 1560 1514 void *pvGst; 1561 1515 bool fFlush; … … 1589 1543 Log(("Removed dirty page %RGp cMods=%d cChanges=%d\n", pPage->GCPhys, pPage->cModifications, cChanges)); 1590 1544 1591 #if def VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R01592 PGM DynMapPopAutoSubset(VMMGetCpu(pVM), iPrevSubset);1545 #if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_R0) || defined(IN_RC) 1546 PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset); 1593 1547 #endif 1594 1548 } … … 1627 1581 * references to physical pages. (the HCPhys linear lookup is *extremely* expensive!) 1628 1582 */ 1629 void *pvShw = PGMPOOL_PAGE_2_ LOCKED_PTR(pPool->CTX_SUFF(pVM), pPage);1583 void *pvShw = PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage); 1630 1584 void *pvGst; 1631 1585 int rc = PGM_GCPHYS_2_PTR(pPool->CTX_SUFF(pVM), pPage->GCPhys, &pvGst); AssertReleaseRC(rc); … … 3352 3306 else 3353 3307 { 3354 # if def VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R03308 # if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC) 3355 3309 /* Start a subset here because pgmPoolTrackFlushGCPhysPTsSlow and 3356 3310 pgmPoolTrackFlushGCPhysPTs will/may kill the pool otherwise. */ 3357 uint32_t iPrevSubset = PGM DynMapPushAutoSubset(pVCpu);3311 uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu); 3358 3312 # endif 3359 3313 … … 3370 3324 *pfFlushTLBs = true; 3371 3325 3372 # if def VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R03373 PGM DynMapPopAutoSubset(pVCpu, iPrevSubset);3326 # if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_R0) || defined(IN_RC) 3327 PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset); 3374 3328 # endif 3375 3329 } … … 3663 3617 AssertFatalMsgFailed(("enmKind=%d iUser=%#x iUserTable=%#x\n", pUserPage->enmKind, pUser->iUser, pUser->iUserTable)); 3664 3618 } 3619 PGM_DYNMAP_UNUSED_HINT_VM(pPool->CTX_SUFF(pVM), u.pau64); 3665 3620 } 3666 3621 … … 4435 4390 * Map the shadow page and take action according to the page kind. 4436 4391 */ 4437 void *pvShw = PGMPOOL_PAGE_2_ LOCKED_PTR(pPool->CTX_SUFF(pVM), pPage);4392 void *pvShw = PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage); 4438 4393 switch (pPage->enmKind) 4439 4394 { … … 4539 4494 STAM_PROFILE_STOP(&pPool->StatZeroPage, z); 4540 4495 pPage->fZeroed = true; 4541 PGM POOL_UNLOCK_PTR(pPool->CTX_SUFF(pVM), pvShw);4496 PGM_DYNMAP_UNUSED_HINT_VM(pPool->CTX_SUFF(pVM), pvShw); 4542 4497 Assert(!pPage->cPresent); 4543 4498 } … … 4596 4551 } 4597 4552 4598 #if def VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R04553 #if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC) 4599 4554 /* Start a subset so we won't run out of mapping space. */ 4600 4555 PVMCPU pVCpu = VMMGetCpu(pVM); 4601 uint32_t iPrevSubset = PGM DynMapPushAutoSubset(pVCpu);4556 uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu); 4602 4557 #endif 4603 4558 … … 4629 4584 pgmPoolCacheFlushPage(pPool, pPage); 4630 4585 4631 #if def VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R04586 #if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_R0) || defined(IN_RC) 4632 4587 /* Heavy stuff done. */ 4633 PGM DynMapPopAutoSubset(pVCpu, iPrevSubset);4588 PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset); 4634 4589 #endif 4635 4590 -
trunk/src/VBox/VMM/VMMAll/TRPMAll.cpp
r30493 r31402 693 693 STAM_PROFILE_ADV_STOP(&pVM->trpm.s.aStatGCTraps[iOrgTrap], o); 694 694 695 CPUMGCCallGuestTrapHandler(pRegFrame, GuestIdte.Gen.u16SegSel | 1, pVM->trpm.s.aGuestTrapHandler[iGate], eflags.u32, ss_r0, (RTRCPTR)esp_r0); 695 PGMRZDynMapReleaseAutoSet(pVCpu); 696 CPUMGCCallGuestTrapHandler(pRegFrame, GuestIdte.Gen.u16SegSel | 1, pVM->trpm.s.aGuestTrapHandler[iGate], 697 eflags.u32, ss_r0, (RTRCPTR)esp_r0); 696 698 /* does not return */ 697 699 #else
Note:
See TracChangeset
for help on using the changeset viewer.