Changeset 9570 in vbox for trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
- Timestamp:
- Jun 10, 2008 1:30:03 PM (17 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r9344 r9570 150 150 PX86PDPAE pPDDst; 151 151 152 rc = PGMShwGet LongModePDPtr(pVM, (RTGCUINTPTR)pvFault, &pPDDst);152 rc = PGMShwGetAllocLongModePDPtr(pVM, (RTGCUINTPTR)pvFault, &pPDDst); 153 153 if (rc != VINF_SUCCESS) 154 154 { … … 1419 1419 1420 1420 #if ( PGM_GST_TYPE == PGM_TYPE_32BIT \ 1421 || PGM_GST_TYPE == PGM_TYPE_PAE) \ 1421 || PGM_GST_TYPE == PGM_TYPE_PAE \ 1422 || PGM_GST_TYPE == PGM_TYPE_AMD64) \ 1422 1423 && PGM_SHW_TYPE != PGM_TYPE_NESTED 1423 1424 … … 1436 1437 * Get the shadow PDE, find the shadow page table in the pool. 1437 1438 */ 1439 # if PGM_SHW_TYPE == PGM_TYPE_32BIT 1438 1440 const unsigned iPDDst = GCPtrPage >> SHW_PD_SHIFT; 1439 # if PGM_SHW_TYPE == PGM_TYPE_32BIT1440 1441 X86PDE PdeDst = pVM->pgm.s.CTXMID(p,32BitPD)->a[iPDDst]; 1441 # else /* PAE */ 1442 # elif PGM_SHW_TYPE == PGM_TYPE_PAE 1443 const unsigned iPDDst = GCPtrPage >> SHW_PD_SHIFT; 1442 1444 X86PDEPAE PdeDst = pVM->pgm.s.CTXMID(ap,PaePDs)[0]->a[iPDDst]; 1445 # elif PGM_SHW_TYPE == PGM_TYPE_AMD64 1446 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK); 1447 PX86PDPAE pPDDst; 1448 X86PDEPAE PdeDst; 1449 1450 int rc = PGMShwGetAllocLongModePDPtr(pVM, GCPtrPage, &pPDDst); 1451 if (rc != VINF_SUCCESS) 1452 { 1453 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("Unexpected rc=%Vrc\n", rc)); 1454 return rc; 1455 } 1456 Assert(pPDDst); 1457 PdeDst = pPDDst->a[iPDDst]; 1443 1458 # endif 1444 1459 Assert(PdeDst.n.u1Present); … … 1627 1642 # if PGM_SHW_TYPE == PGM_TYPE_32BIT 1628 1643 pVM->pgm.s.CTXMID(p,32BitPD)->a[iPDDst] = PdeDst; 1629 # el se /* PAE */1644 # elif PGM_SHW_TYPE == PGM_TYPE_PAE 1630 1645 pVM->pgm.s.CTXMID(ap,PaePDs)[0]->a[iPDDst] = PdeDst; 1646 # elif PGM_SHW_TYPE == PGM_TYPE_AMD64 1647 pPDDst->a[iPDDst] = PdeDst; 1631 1648 # endif 1632 1649 Log2(("SyncPage: BIG %VGv PdeSrc:{P=%d RW=%d U=%d raw=%08llx} GCPhys=%VGp%s\n", … … 1655 1672 # if PGM_SHW_TYPE == PGM_TYPE_32BIT 1656 1673 pVM->pgm.s.CTXMID(p,32BitPD)->a[iPDDst].u = 0; 1657 # el se /* PAE */1674 # elif PGM_SHW_TYPE == PGM_TYPE_PAE 1658 1675 pVM->pgm.s.CTXMID(ap,PaePDs)[0]->a[iPDDst].u = 0; 1676 # elif PGM_SHW_TYPE == PGM_TYPE_AMD64 1677 pPDDst->a[iPDDst].u = 0; 1659 1678 # endif 1660 1679 PGM_INVL_GUEST_TLBS(); … … 1755 1774 return VINF_SUCCESS; 1756 1775 1757 #else /* PGM_GST_TYPE == PGM_TYPE_AMD64 */1776 #else 1758 1777 AssertReleaseMsgFailed(("Shw=%d Gst=%d is not implemented!\n", PGM_GST_TYPE, PGM_SHW_TYPE)); 1759 1778 return VERR_INTERNAL_ERROR; 1760 #endif /* PGM_GST_TYPE == PGM_TYPE_AMD64 */1779 #endif 1761 1780 } 1762 1781 … … 2093 2112 LogFlow(("SyncPT: GCPtrPage=%VGv\n", GCPtrPage)); 2094 2113 2095 #if ( PGM_GST_TYPE == PGM_TYPE_32BIT \ 2096 || PGM_GST_TYPE == PGM_TYPE_PAE) \ 2114 #if ( PGM_GST_TYPE == PGM_TYPE_32BIT \ 2115 || PGM_GST_TYPE == PGM_TYPE_PAE \ 2116 || PGM_GST_TYPE == PGM_TYPE_AMD64) \ 2097 2117 && PGM_SHW_TYPE != PGM_TYPE_NESTED 2118 2119 int rc = VINF_SUCCESS; 2098 2120 2099 2121 /* … … 2102 2124 AssertMsg(iPDSrc == ((GCPtrPage >> GST_PD_SHIFT) & GST_PD_MASK), ("iPDSrc=%x GCPtrPage=%VGv\n", iPDSrc, GCPtrPage)); 2103 2125 # if PGM_SHW_TYPE == PGM_TYPE_32BIT 2126 const unsigned iPDDst = GCPtrPage >> SHW_PD_SHIFT; 2104 2127 PX86PD pPDDst = pVM->pgm.s.CTXMID(p,32BitPD); 2105 # else 2128 # elif PGM_SHW_TYPE == PGM_TYPE_PAE 2129 const unsigned iPDDst = GCPtrPage >> SHW_PD_SHIFT; 2106 2130 PX86PDPAE pPDDst = pVM->pgm.s.CTXMID(ap,PaePDs)[0]; 2107 # endif 2108 const unsigned iPDDst = GCPtrPage >> SHW_PD_SHIFT; 2131 # elif PGM_SHW_TYPE == PGM_TYPE_AMD64 2132 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK; 2133 PX86PDPAE pPDDst; 2134 rc = PGMShwGetAllocLongModePDPtr(pVM, GCPtrPage, &pPDDst); 2135 if (rc != VINF_SUCCESS) 2136 { 2137 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("Unexpected rc=%Vrc\n", rc)); 2138 return rc; 2139 } 2140 Assert(pPDDst); 2141 # endif 2109 2142 PSHWPDE pPdeDst = &pPDDst->a[iPDDst]; 2110 2143 SHWPDE PdeDst = *pPdeDst; … … 2130 2163 # elif PGM_GST_TYPE == PGM_TYPE_PAE 2131 2164 int rc = pgmR3SyncPTResolveConflictPAE(pVM, pMapping, GCPtrPage & (GST_PD_MASK << GST_PD_SHIFT)); 2165 # else 2166 AssertFailed(); /* can't happen for amd64 */ 2132 2167 # endif 2133 2168 if (VBOX_FAILURE(rc)) … … 2147 2182 * Sync page directory entry. 2148 2183 */ 2149 int rc = VINF_SUCCESS;2150 2184 GSTPDE PdeSrc = pPDSrc->a[iPDSrc]; 2151 2185 if (PdeSrc.n.u1Present) … … 2502 2536 return rc; 2503 2537 2504 #else /* PGM_GST_TYPE == PGM_TYPE_AMD64 */2538 #else 2505 2539 AssertReleaseMsgFailed(("Shw=%d Gst=%d is not implemented!\n", PGM_GST_TYPE, PGM_SHW_TYPE)); 2506 2540 STAM_PROFILE_STOP(&pVM->pgm.s.CTXMID(Stat,SyncPT), a); 2507 2541 return VERR_INTERNAL_ERROR; 2508 #endif /* PGM_GST_TYPE == PGM_TYPE_AMD64 */2542 #endif 2509 2543 } 2510 2544 … … 2523 2557 PGM_BTH_DECL(int, PrefetchPage)(PVM pVM, RTGCUINTPTR GCPtrPage) 2524 2558 { 2525 #if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT || PGM_GST_TYPE == PGM_TYPE_PAE ) \2526 && PGM_SHW_TYPE != PGM_TYPE_ AMD64 && PGM_SHW_TYPE != PGM_TYPE_NESTED2559 #if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64) \ 2560 && PGM_SHW_TYPE != PGM_TYPE_NESTED 2527 2561 /* 2528 2562 * Check that all Guest levels thru the PDE are present, getting the … … 2534 2568 const unsigned iPDSrc = (RTGCUINTPTR)GCPtrPage >> GST_PD_SHIFT; 2535 2569 PGSTPD pPDSrc = CTXSUFF(pVM->pgm.s.pGuestPD); 2536 # el se /* PAE */2570 # elif PGM_GST_TYPE == PGM_TYPE_PAE 2537 2571 unsigned iPDSrc; 2538 2572 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, GCPtrPage, &iPDSrc); 2573 if (!pPDSrc) 2574 return VINF_SUCCESS; /* not present */ 2575 # elif PGM_GST_TYPE == PGM_TYPE_AMD64 2576 unsigned iPDSrc; 2577 PX86PML4E pPml4e; 2578 X86PDPE Pdpe; 2579 PGSTPD pPDSrc = pgmGstGetLongModePDPtr(&pVM->pgm.s, GCPtrPage, &pPml4e, &Pdpe, &iPDSrc); 2539 2580 if (!pPDSrc) 2540 2581 return VINF_SUCCESS; /* not present */ … … 2557 2598 # if PGM_SHW_TYPE == PGM_TYPE_32BIT 2558 2599 const X86PDE PdeDst = pVM->pgm.s.CTXMID(p,32BitPD)->a[GCPtrPage >> SHW_PD_SHIFT]; 2559 # el se2600 # elif PGM_SHW_TYPE == PGM_TYPE_PAE 2560 2601 const X86PDEPAE PdeDst = pVM->pgm.s.CTXMID(ap,PaePDs)[0]->a[GCPtrPage >> SHW_PD_SHIFT]; 2602 # elif PGM_SHW_TYPE == PGM_TYPE_AMD64 2603 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK); 2604 PX86PDPAE pPDDst; 2605 PX86PDPT pPdptDst; 2606 X86PDEPAE PdeDst; 2607 2608 int rc = PGMShwGetLongModePDPtr(pVM, GCPtrPage, &pPdptDst, &pPDDst); 2609 if (rc != VINF_SUCCESS) 2610 { 2611 AssertMsg(rc == VERR_PAGE_TABLE_NOT_PRESENT, ("Unexpected rc=%Vrc\n", rc)); 2612 return rc; 2613 } 2614 Assert(pPDDst); 2615 PdeDst = pPDDst->a[iPDDst]; 2561 2616 # endif 2562 2617 if (!(PdeDst.u & PGM_PDFLAGS_MAPPING)) … … 2580 2635 #elif PGM_SHW_TYPE == PGM_TYPE_NESTED 2581 2636 return VINF_SUCCESS; /* ignore */ 2582 #else /* PGM_GST_TYPE == PGM_TYPE_AMD64 */ 2583 2584 AssertReleaseMsgFailed(("Shw=%d Gst=%d is not implemented!\n", PGM_SHW_TYPE, PGM_GST_TYPE)); 2585 return VERR_INTERNAL_ERROR; 2586 #endif /* PGM_GST_TYPE == PGM_TYPE_AMD64 */ 2637 #endif 2587 2638 } 2588 2639 … … 2603 2654 2604 2655 Assert(!HWACCMIsNestedPagingActive(pVM)); 2605 #if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT || PGM_GST_TYPE == PGM_TYPE_PAE ) \2606 && PGM_SHW_TYPE != PGM_TYPE_ AMD64 && PGM_SHW_TYPE != PGM_TYPE_NESTED2656 #if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_TYPE_AMD64) \ 2657 && PGM_SHW_TYPE != PGM_TYPE_NESTED 2607 2658 2608 2659 # ifndef IN_RING0 … … 2625 2676 const unsigned iPDSrc = (RTGCUINTPTR)GCPtrPage >> GST_PD_SHIFT; 2626 2677 PGSTPD pPDSrc = CTXSUFF(pVM->pgm.s.pGuestPD); 2627 # el se /* PAE */2678 # elif PGM_GST_TYPE == PGM_TYPE_PAE 2628 2679 unsigned iPDSrc; 2629 2680 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, GCPtrPage, &iPDSrc); 2630 2681 2631 2682 if (pPDSrc) 2683 { 2684 Log(("PGMVerifyAccess: access violation for %VGv due to non-present PDPTR\n", GCPtrPage)); 2685 return VINF_EM_RAW_GUEST_TRAP; 2686 } 2687 # elif PGM_GST_TYPE == PGM_TYPE_AMD64 2688 unsigned iPDSrc; 2689 PX86PML4E pPml4e; 2690 X86PDPE Pdpe; 2691 PGSTPD pPDSrc = pgmGstGetLongModePDPtr(&pVM->pgm.s, GCPtrPage, &pPml4e, &Pdpe, &iPDSrc); 2692 if (!pPDSrc) 2632 2693 { 2633 2694 Log(("PGMVerifyAccess: access violation for %VGv due to non-present PDPTR\n", GCPtrPage)); … … 2646 2707 # if PGM_SHW_TYPE == PGM_TYPE_32BIT 2647 2708 PX86PDE pPdeDst = &pVM->pgm.s.CTXMID(p,32BitPD)->a[GCPtrPage >> SHW_PD_SHIFT]; 2648 # el se2709 # elif PGM_SHW_TYPE == PGM_TYPE_PAE 2649 2710 PX86PDEPAE pPdeDst = &pVM->pgm.s.CTXMID(ap,PaePDs)[0]->a[GCPtrPage >> SHW_PD_SHIFT]; 2711 # elif PGM_SHW_TYPE == PGM_TYPE_AMD64 2712 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK); 2713 PX86PDPAE pPDDst; 2714 PX86PDEPAE pPdeDst; 2715 2716 rc = PGMShwGetAllocLongModePDPtr(pVM, GCPtrPage, &pPDDst); 2717 if (rc != VINF_SUCCESS) 2718 { 2719 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("Unexpected rc=%Vrc\n", rc)); 2720 return rc; 2721 } 2722 Assert(pPDDst); 2723 pPdeDst = &pPDDst->a[iPDDst]; 2650 2724 # endif 2651 2725 if (!pPdeDst->n.u1Present) … … 2704 2778 2705 2779 2706 #if PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE 2707 # if PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_PAE 2780 #if PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64 2781 # if PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_AMD64 2708 2782 /** 2709 2783 * Figures out which kind of shadow page this guest PDE warrants. … … 2805 2879 MY_STAM_COUNTER_INC(fGlobal ? &pVM->pgm.s.CTXMID(Stat,SyncCR3Global) : &pVM->pgm.s.CTXMID(Stat,SyncCR3NotGlobal)); 2806 2880 2807 # if PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE 2881 # if PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64 2808 2882 /* 2809 2883 * Get page directory addresses. … … 2811 2885 # if PGM_SHW_TYPE == PGM_TYPE_32BIT 2812 2886 PX86PDE pPDEDst = &pVM->pgm.s.CTXMID(p,32BitPD)->a[0]; 2813 # else /* PGM_SHW_TYPE == PGM_TYPE_PAE */2887 # else /* PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_AMD64*/ 2814 2888 # if PGM_GST_TYPE == PGM_TYPE_32BIT 2815 2889 PX86PDEPAE pPDEDst = &pVM->pgm.s.CTXMID(ap,PaePDs)[0]->a[0]; … … 2844 2918 iPdNoMapping = ~0U; 2845 2919 } 2920 # if PGM_GST_TYPE == PGM_TYPE_AMD64 2921 for (uint64_t iPML4E = 0; iPML4E < X86_PG_PAE_ENTRIES; iPML4E++) 2922 { 2923 # else 2924 { 2925 # endif 2846 2926 # if PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64 2847 for (unsigned iPDPTE = 0; iPDPTE < GST_PDPE_ENTRIES; iPDPTE++) 2848 { 2849 unsigned iPDSrc; 2850 # if PGM_SHW_TYPE == PGM_TYPE_PAE 2851 PX86PDPAE pPDPAE = pVM->pgm.s.CTXMID(ap,PaePDs)[0]; 2927 for (uint64_t iPDPTE = 0; iPDPTE < GST_PDPE_ENTRIES; iPDPTE++) 2928 { 2929 unsigned iPDSrc; 2930 # if PGM_GST_TYPE == PGM_TYPE_PAE 2931 PX86PDPAE pPDPAE = pVM->pgm.s.CTXMID(ap,PaePDs)[0]; 2932 PX86PDEPAE pPDEDst = &pPDPAE->a[iPDPTE * X86_PG_PAE_ENTRIES]; 2933 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, iPDPTE << X86_PDPT_SHIFT, &iPDSrc); 2852 2934 # else 2853 AssertFailed(); /* @todo */ 2854 PX86PDPE pPDPAE = pVM->pgm.s.CTXMID(ap,PaePDs)[iPDPTE * X86_PG_AMD64_ENTRIES]; 2935 PX86PML4E pPml4eSrc; 2936 X86PDPE PdpeSrc; 2937 PX86PDPT pPdptDst; 2938 PX86PDPAE pPDDst; 2939 PX86PDEPAE pPDEDst; 2940 RTGCUINTPTR GCPtr = (iPML4E << X86_PML4_SHIFT) || (iPDPTE << X86_PDPT_SHIFT); 2941 PGSTPD pPDSrc = pgmGstGetLongModePDPtr(&pVM->pgm.s, GCPtr, &pPml4eSrc, &PdpeSrc, &iPDSrc); 2942 2943 int rc = PGMShwGetLongModePDPtr(pVM, GCPtr, &pPdptDst, &pPDDst); 2944 if (rc != VINF_SUCCESS) 2945 { 2946 AssertMsg(rc == VERR_PAGE_TABLE_NOT_PRESENT, ("Unexpected rc=%Vrc\n", rc)); 2947 return rc; 2948 } 2949 Assert(pPDDst); 2950 pPDEDst = &pPDDst->a[0]; 2951 2952 if (!pPml4eSrc->n.u1Present) 2953 { 2954 /* Guest PML4 not present (anymore). */ 2955 if (pVM->pgm.s.CTXMID(p,PaePML4)->a[iPML4E].n.u1Present) 2956 { 2957 /* Shadow PML4 present, so free all pdpt & pd entries. */ 2958 for (iPDPTE = 0; iPDPTE < ELEMENTS(pPdptDst->a); iPDPTE++) 2959 { 2960 if (pPdptDst->a[iPDPTE].n.u1Present) 2961 { 2962 GCPtr = (iPML4E << X86_PML4_SHIFT) || (iPDPTE << X86_PDPT_SHIFT); 2963 2964 rc = PGMShwGetLongModePDPtr(pVM, GCPtr, &pPdptDst, &pPDDst); 2965 if (rc != VINF_SUCCESS) 2966 { 2967 AssertMsg(rc == VERR_PAGE_TABLE_NOT_PRESENT, ("Unexpected rc=%Vrc\n", rc)); 2968 return rc; 2969 } 2970 2971 for (unsigned iPD = 0; iPD < ELEMENTS(pPDDst->a); iPD++) 2972 { 2973 if ( pPDDst->a[iPD].n.u1Present 2974 && !(pPDDst->a[iPD].u & PGM_PDFLAGS_MAPPING)) 2975 { 2976 pgmPoolFreeByPage(pPool, pgmPoolGetPage(pPool, pPDDst->a[iPD].u & SHW_PDE_PG_MASK), PGMPOOL_IDX_PAE_PD, (iPML4E * X86_PG_PAE_ENTRIES + iPDPTE) * X86_PG_PAE_ENTRIES + iPD); 2977 pPDDst->a[iPD].u = 0; 2978 } 2979 } 2980 2981 pgmPoolFreeByPage(pPool, pgmPoolGetPage(pPool, pPdptDst->a[iPDPTE].u & SHW_PDE_PG_MASK), PGMPOOL_IDX_PDPT, iPDPTE); 2982 pPdptDst->a[iPDPTE].u = 0; 2983 } 2984 } 2985 } 2986 pgmPoolFreeByPage(pPool, pgmPoolGetPage(pPool, pVM->pgm.s.CTXMID(p,PaePML4)->a[iPML4E].u & SHW_PDE_PG_MASK), PGMPOOL_IDX_PML4, iPML4E); 2987 pVM->pgm.s.CTXMID(p,PaePML4)->a[iPML4E].n.u1Present = 0; 2988 break; 2989 } 2855 2990 # endif 2856 PX86PDEPAE pPDEDst = &pPDPAE->a[iPDPTE * X86_PG_PAE_ENTRIES]; 2857 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, iPDPTE << X86_PDPT_SHIFT, &iPDSrc); 2858 2859 if (pPDSrc == NULL) 2860 { 2861 /* PDPT not present */ 2862 if (pVM->pgm.s.CTXMID(p,PaePDPT)->a[iPDPTE].n.u1Present) 2863 { 2864 for (unsigned iPD = 0; iPD < ELEMENTS(pPDSrc->a); iPD++) 2991 Assert(iPDSrc == 0); 2992 2993 if (pPDSrc == NULL) 2994 { 2995 /* PDPE not present */ 2996 if (pVM->pgm.s.CTXMID(p,PaePDPT)->a[iPDPTE].n.u1Present) 2865 2997 { 2866 if ( pPDEDst[iPD].n.u1Present 2867 && !(pPDEDst[iPD].u & PGM_PDFLAGS_MAPPING)) 2868 { 2869 pgmPoolFreeByPage(pPool, pgmPoolGetPage(pPool, pPDEDst[iPD].u & SHW_PDE_PG_MASK), SHW_POOL_ROOT_IDX, iPDPTE * X86_PG_PAE_ENTRIES + iPD); 2870 pPDEDst[iPD].u = 0; 2998 /* for each page directory entry */ 2999 for (unsigned iPD = 0; iPD < ELEMENTS(pPDSrc->a); iPD++) 3000 { 3001 if ( pPDEDst[iPD].n.u1Present 3002 && !(pPDEDst[iPD].u & PGM_PDFLAGS_MAPPING)) 3003 { 3004 # if PGM_GST_TYPE == PGM_TYPE_AMD64 3005 pgmPoolFreeByPage(pPool, pgmPoolGetPage(pPool, pPDEDst[iPD].u & SHW_PDE_PG_MASK), PGMPOOL_IDX_PAE_PD, (iPML4E * X86_PG_PAE_ENTRIES + iPDPTE) * X86_PG_PAE_ENTRIES + iPD); 3006 # else 3007 pgmPoolFreeByPage(pPool, pgmPoolGetPage(pPool, pPDEDst[iPD].u & SHW_PDE_PG_MASK), PGMPOOL_IDX_PAE_PD, iPDPTE * X86_PG_PAE_ENTRIES + iPD); 3008 # endif 3009 pPDEDst[iPD].u = 0; 3010 } 2871 3011 } 2872 3012 } 3013 if (!(pVM->pgm.s.CTXMID(p,PaePDPT)->a[iPDPTE].u & PGM_PLXFLAGS_MAPPING)) 3014 pVM->pgm.s.CTXMID(p,PaePDPT)->a[iPDPTE].n.u1Present = 0; 3015 continue; 2873 3016 } 2874 if (!(pVM->pgm.s.CTXMID(p,PaePDPT)->a[iPDPTE].u & PGM_PLXFLAGS_MAPPING))2875 pVM->pgm.s.CTXMID(p,PaePDPT)->a[iPDPTE].n.u1Present = 0;2876 continue;2877 }2878 3017 # else /* PGM_GST_TYPE != PGM_TYPE_PAE && PGM_GST_TYPE != PGM_TYPE_AMD64 */ 2879 {3018 { 2880 3019 # endif /* PGM_GST_TYPE != PGM_TYPE_PAE && PGM_GST_TYPE != PGM_TYPE_AMD64 */ 2881 for (unsigned iPD = 0; iPD < ELEMENTS(pPDSrc->a); iPD++)2882 {3020 for (unsigned iPD = 0; iPD < ELEMENTS(pPDSrc->a); iPD++) 3021 { 2883 3022 # if PGM_SHW_TYPE == PGM_TYPE_32BIT 2884 Assert(&pVM->pgm.s.CTXMID(p,32BitPD)->a[iPD] == pPDEDst);3023 Assert(&pVM->pgm.s.CTXMID(p,32BitPD)->a[iPD] == pPDEDst); 2885 3024 # elif PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT 2886 AssertMsg(&pVM->pgm.s.CTXMID(ap,PaePDs)[iPD * 2 / 512]->a[iPD * 2 % 512] == pPDEDst, ("%p vs %p\n", &pVM->pgm.s.CTXMID(ap,PaePDs)[iPD * 2 / 512]->a[iPD * 2 % 512], pPDEDst));3025 AssertMsg(&pVM->pgm.s.CTXMID(ap,PaePDs)[iPD * 2 / 512]->a[iPD * 2 % 512] == pPDEDst, ("%p vs %p\n", &pVM->pgm.s.CTXMID(ap,PaePDs)[iPD * 2 / 512]->a[iPD * 2 % 512], pPDEDst)); 2887 3026 # endif 2888 register GSTPDE PdeSrc = pPDSrc->a[iPD];2889 if ( PdeSrc.n.u1Present2890 && (PdeSrc.n.u1User || fRawR0Enabled))2891 {3027 register GSTPDE PdeSrc = pPDSrc->a[iPD]; 3028 if ( PdeSrc.n.u1Present 3029 && (PdeSrc.n.u1User || fRawR0Enabled)) 3030 { 2892 3031 # if ( PGM_GST_TYPE == PGM_TYPE_32BIT \ 2893 3032 || PGM_GST_TYPE == PGM_TYPE_PAE) \ 2894 3033 && !defined(PGM_WITHOUT_MAPPINGS) 2895 3034 2896 /*2897 * Check for conflicts with GC mappings.2898 */3035 /* 3036 * Check for conflicts with GC mappings. 3037 */ 2899 3038 # if PGM_GST_TYPE == PGM_TYPE_PAE 2900 if (iPD + iPDPTE * X86_PG_PAE_ENTRIES == iPdNoMapping)3039 if (iPD + iPDPTE * X86_PG_PAE_ENTRIES == iPdNoMapping) 2901 3040 # else 2902 if (iPD == iPdNoMapping)3041 if (iPD == iPdNoMapping) 2903 3042 # endif 2904 {2905 if (pVM->pgm.s.fMappingsFixed)2906 {2907 /* It's fixed, just skip the mapping. */2908 const unsigned cPTs = pMapping->cb >> GST_PD_SHIFT;2909 iPD += cPTs - 1;2910 pPDEDst += cPTs + (PGM_GST_TYPE != PGM_SHW_TYPE) * cPTs; /* Only applies to the pae shadow and 32 bits guest case */2911 pMapping = pMapping->CTXALLSUFF(pNext);2912 iPdNoMapping = pMapping ? pMapping->GCPtr >> GST_PD_SHIFT : ~0U;2913 continue;2914 }3043 { 3044 if (pVM->pgm.s.fMappingsFixed) 3045 { 3046 /* It's fixed, just skip the mapping. */ 3047 const unsigned cPTs = pMapping->cb >> GST_PD_SHIFT; 3048 iPD += cPTs - 1; 3049 pPDEDst += cPTs + (PGM_GST_TYPE != PGM_SHW_TYPE) * cPTs; /* Only applies to the pae shadow and 32 bits guest case */ 3050 pMapping = pMapping->CTXALLSUFF(pNext); 3051 iPdNoMapping = pMapping ? pMapping->GCPtr >> GST_PD_SHIFT : ~0U; 3052 continue; 3053 } 2915 3054 # ifdef IN_RING3 2916 3055 # if PGM_GST_TYPE == PGM_TYPE_32BIT 2917 int rc = pgmR3SyncPTResolveConflict(pVM, pMapping, pPDSrc, iPD << GST_PD_SHIFT);3056 int rc = pgmR3SyncPTResolveConflict(pVM, pMapping, pPDSrc, iPD << GST_PD_SHIFT); 2918 3057 # elif PGM_GST_TYPE == PGM_TYPE_PAE 2919 int rc = pgmR3SyncPTResolveConflictPAE(pVM, pMapping, (iPDPTE << GST_PDPT_SHIFT) + (iPD << GST_PD_SHIFT));3058 int rc = pgmR3SyncPTResolveConflictPAE(pVM, pMapping, (iPDPTE << GST_PDPT_SHIFT) + (iPD << GST_PD_SHIFT)); 2920 3059 # endif 2921 if (VBOX_FAILURE(rc)) 2922 return rc; 2923 3060 if (VBOX_FAILURE(rc)) 3061 return rc; 3062 3063 /* 3064 * Update iPdNoMapping and pMapping. 3065 */ 3066 pMapping = pVM->pgm.s.pMappingsR3; 3067 while (pMapping && pMapping->GCPtr < (iPD << GST_PD_SHIFT)) 3068 pMapping = pMapping->pNextR3; 3069 iPdNoMapping = pMapping ? pMapping->GCPtr >> GST_PD_SHIFT : ~0U; 3070 # else 3071 LogFlow(("SyncCR3: detected conflict -> VINF_PGM_SYNC_CR3\n")); 3072 return VINF_PGM_SYNC_CR3; 3073 # endif 3074 } 3075 # else /* (PGM_GST_TYPE != PGM_TYPE_32BIT && PGM_GST_TYPE != PGM_TYPE_PAE) || PGM_WITHOUT_MAPPINGS */ 3076 Assert(!pgmMapAreMappingsEnabled(&pVM->pgm.s)); 3077 # endif /* (PGM_GST_TYPE != PGM_TYPE_32BIT && PGM_GST_TYPE != PGM_TYPE_PAE) || PGM_WITHOUT_MAPPINGS */ 2924 3078 /* 2925 * Update iPdNoMapping and pMapping. 2926 */ 2927 pMapping = pVM->pgm.s.pMappingsR3; 2928 while (pMapping && pMapping->GCPtr < (iPD << GST_PD_SHIFT)) 2929 pMapping = pMapping->pNextR3; 2930 iPdNoMapping = pMapping ? pMapping->GCPtr >> GST_PD_SHIFT : ~0U; 2931 # else 2932 LogFlow(("SyncCR3: detected conflict -> VINF_PGM_SYNC_CR3\n")); 2933 return VINF_PGM_SYNC_CR3; 2934 # endif 3079 * Sync page directory entry. 3080 * 3081 * The current approach is to allocated the page table but to set 3082 * the entry to not-present and postpone the page table synching till 3083 * it's actually used. 3084 */ 3085 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT 3086 for (unsigned i = 0, iPdShw = iPD * 2; i < 2; i++, iPdShw++) /* pray that the compiler unrolls this */ 3087 # elif PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64 3088 const unsigned iPdShw = iPD + iPDPTE * X86_PG_PAE_ENTRIES; NOREF(iPdShw); 3089 # else 3090 const unsigned iPdShw = iPD; NOREF(iPdShw); 3091 # endif 3092 { 3093 SHWPDE PdeDst = *pPDEDst; 3094 if (PdeDst.n.u1Present) 3095 { 3096 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, PdeDst.u & SHW_PDE_PG_MASK); 3097 RTGCPHYS GCPhys; 3098 if ( !PdeSrc.b.u1Size 3099 || !(cr4 & X86_CR4_PSE)) 3100 { 3101 GCPhys = PdeSrc.u & GST_PDE_PG_MASK; 3102 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT 3103 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */ 3104 GCPhys |= i * (PAGE_SIZE / 2); 3105 # endif 3106 } 3107 else 3108 { 3109 GCPhys = PdeSrc.u & GST_PDE_BIG_PG_MASK; 3110 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT 3111 /* Select the right PDE as we're emulating a 4MB page directory with two 2 MB shadow PDEs.*/ 3112 GCPhys |= i * X86_PAGE_2M_SIZE; 3113 # endif 3114 } 3115 3116 if ( pShwPage->GCPhys == GCPhys 3117 && pShwPage->enmKind == PGM_BTH_NAME(CalcPageKind)(&PdeSrc, cr4) 3118 && ( pShwPage->fCached 3119 || ( !fGlobal 3120 && ( false 3121 # ifdef PGM_SKIP_GLOBAL_PAGEDIRS_ON_NONGLOBAL_FLUSH 3122 || ( (PdeSrc.u & (X86_PDE4M_PS | X86_PDE4M_G)) == (X86_PDE4M_PS | X86_PDE4M_G) 3123 && (cr4 & (X86_CR4_PGE | X86_CR4_PSE)) == (X86_CR4_PGE | X86_CR4_PSE)) /* global 2/4MB page. */ 3124 || ( !pShwPage->fSeenNonGlobal 3125 && (cr4 & X86_CR4_PGE)) 3126 # endif 3127 ) 3128 ) 3129 ) 3130 && ( (PdeSrc.u & (X86_PDE_US | X86_PDE_RW)) == (PdeDst.u & (X86_PDE_US | X86_PDE_RW)) 3131 || ( (cr4 & X86_CR4_PSE) 3132 && ((PdeSrc.u & (X86_PDE_US | X86_PDE4M_PS | X86_PDE4M_D)) | PGM_PDFLAGS_TRACK_DIRTY) 3133 == ((PdeDst.u & (X86_PDE_US | X86_PDE_RW | PGM_PDFLAGS_TRACK_DIRTY)) | X86_PDE4M_PS)) 3134 ) 3135 ) 3136 { 3137 # ifdef VBOX_WITH_STATISTICS 3138 if ( !fGlobal 3139 && (PdeSrc.u & (X86_PDE4M_PS | X86_PDE4M_G)) == (X86_PDE4M_PS | X86_PDE4M_G) 3140 && (cr4 & (X86_CR4_PGE | X86_CR4_PSE)) == (X86_CR4_PGE | X86_CR4_PSE)) 3141 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,SyncCR3DstSkippedGlobalPD)); 3142 else if (!fGlobal && !pShwPage->fSeenNonGlobal && (cr4 & X86_CR4_PGE)) 3143 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,SyncCR3DstSkippedGlobalPT)); 3144 else 3145 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,SyncCR3DstCacheHit)); 3146 # endif /* VBOX_WITH_STATISTICS */ 3147 /** @todo a replacement strategy isn't really needed unless we're using a very small pool < 512 pages. 3148 * The whole ageing stuff should be put in yet another set of #ifdefs. For now, let's just skip it. */ 3149 //# ifdef PGMPOOL_WITH_CACHE 3150 // pgmPoolCacheUsed(pPool, pShwPage); 3151 //# endif 3152 } 3153 else 3154 { 3155 pgmPoolFreeByPage(pPool, pShwPage, SHW_POOL_ROOT_IDX, iPdShw); 3156 pPDEDst->u = 0; 3157 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,SyncCR3DstFreed)); 3158 } 3159 } 3160 else 3161 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,SyncCR3DstNotPresent)); 3162 pPDEDst++; 3163 } 2935 3164 } 2936 # else /* (PGM_GST_TYPE != PGM_TYPE_32BIT && PGM_GST_TYPE != PGM_TYPE_PAE) || PGM_WITHOUT_MAPPINGS */ 2937 Assert(!pgmMapAreMappingsEnabled(&pVM->pgm.s)); 2938 # endif /* (PGM_GST_TYPE != PGM_TYPE_32BIT && PGM_GST_TYPE != PGM_TYPE_PAE) || PGM_WITHOUT_MAPPINGS */ 2939 /* 2940 * Sync page directory entry. 2941 * 2942 * The current approach is to allocated the page table but to set 2943 * the entry to not-present and postpone the page table synching till 2944 * it's actually used. 2945 */ 2946 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT 2947 for (unsigned i = 0, iPdShw = iPD * 2; i < 2; i++, iPdShw++) /* pray that the compiler unrolls this */ 2948 # elif PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64 2949 const unsigned iPdShw = iPD + iPDPTE * X86_PG_PAE_ENTRIES; NOREF(iPdShw); 3165 # if PGM_GST_TYPE == PGM_TYPE_PAE 3166 else if (iPD + iPDPTE * X86_PG_PAE_ENTRIES != iPdNoMapping) 2950 3167 # else 2951 const unsigned iPdShw = iPD; NOREF(iPdShw);3168 else if (iPD != iPdNoMapping) 2952 3169 # endif 2953 3170 { 2954 SHWPDE PdeDst = *pPDEDst; 2955 if (PdeDst.n.u1Present) 2956 { 2957 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, PdeDst.u & SHW_PDE_PG_MASK); 2958 RTGCPHYS GCPhys; 2959 if ( !PdeSrc.b.u1Size 2960 || !(cr4 & X86_CR4_PSE)) 3171 /* 3172 * Check if there is any page directory to mark not present here. 3173 */ 3174 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT 3175 for (unsigned i = 0, iPdShw = iPD * 2; i < 2; i++, iPdShw++) /* pray that the compiler unrolls this */ 3176 # elif PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64 3177 const unsigned iPdShw = iPD + iPDPTE * X86_PG_PAE_ENTRIES; NOREF(iPdShw); 3178 # else 3179 const unsigned iPdShw = iPD; NOREF(iPdShw); 3180 # endif 3181 { 3182 if (pPDEDst->n.u1Present) 2961 3183 { 2962 GCPhys = PdeSrc.u & GST_PDE_PG_MASK; 2963 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT 2964 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */ 2965 GCPhys |= i * (PAGE_SIZE / 2); 2966 # endif 3184 pgmPoolFreeByPage(pPool, pgmPoolGetPage(pPool, pPDEDst->u & SHW_PDE_PG_MASK), SHW_POOL_ROOT_IDX, iPdShw); 3185 pPDEDst->u = 0; 3186 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,SyncCR3DstFreedSrcNP)); 2967 3187 } 2968 else 2969 { 2970 GCPhys = PdeSrc.u & GST_PDE_BIG_PG_MASK; 2971 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT 2972 /* Select the right PDE as we're emulating a 4MB page directory with two 2 MB shadow PDEs.*/ 2973 GCPhys |= i * X86_PAGE_2M_SIZE; 2974 # endif 2975 } 2976 2977 if ( pShwPage->GCPhys == GCPhys 2978 && pShwPage->enmKind == PGM_BTH_NAME(CalcPageKind)(&PdeSrc, cr4) 2979 && ( pShwPage->fCached 2980 || ( !fGlobal 2981 && ( false 2982 # ifdef PGM_SKIP_GLOBAL_PAGEDIRS_ON_NONGLOBAL_FLUSH 2983 || ( (PdeSrc.u & (X86_PDE4M_PS | X86_PDE4M_G)) == (X86_PDE4M_PS | X86_PDE4M_G) 2984 && (cr4 & (X86_CR4_PGE | X86_CR4_PSE)) == (X86_CR4_PGE | X86_CR4_PSE)) /* global 2/4MB page. */ 2985 || ( !pShwPage->fSeenNonGlobal 2986 && (cr4 & X86_CR4_PGE)) 2987 # endif 2988 ) 2989 ) 2990 ) 2991 && ( (PdeSrc.u & (X86_PDE_US | X86_PDE_RW)) == (PdeDst.u & (X86_PDE_US | X86_PDE_RW)) 2992 || ( (cr4 & X86_CR4_PSE) 2993 && ((PdeSrc.u & (X86_PDE_US | X86_PDE4M_PS | X86_PDE4M_D)) | PGM_PDFLAGS_TRACK_DIRTY) 2994 == ((PdeDst.u & (X86_PDE_US | X86_PDE_RW | PGM_PDFLAGS_TRACK_DIRTY)) | X86_PDE4M_PS)) 2995 ) 2996 ) 2997 { 2998 # ifdef VBOX_WITH_STATISTICS 2999 if ( !fGlobal 3000 && (PdeSrc.u & (X86_PDE4M_PS | X86_PDE4M_G)) == (X86_PDE4M_PS | X86_PDE4M_G) 3001 && (cr4 & (X86_CR4_PGE | X86_CR4_PSE)) == (X86_CR4_PGE | X86_CR4_PSE)) 3002 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,SyncCR3DstSkippedGlobalPD)); 3003 else if (!fGlobal && !pShwPage->fSeenNonGlobal && (cr4 & X86_CR4_PGE)) 3004 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,SyncCR3DstSkippedGlobalPT)); 3005 else 3006 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,SyncCR3DstCacheHit)); 3007 # endif /* VBOX_WITH_STATISTICS */ 3008 /** @todo a replacement strategy isn't really needed unless we're using a very small pool < 512 pages. 3009 * The whole ageing stuff should be put in yet another set of #ifdefs. For now, let's just skip it. */ 3010 //# ifdef PGMPOOL_WITH_CACHE 3011 // pgmPoolCacheUsed(pPool, pShwPage); 3012 //# endif 3013 } 3014 else 3015 { 3016 pgmPoolFreeByPage(pPool, pShwPage, SHW_POOL_ROOT_IDX, iPdShw); 3017 pPDEDst->u = 0; 3018 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,SyncCR3DstFreed)); 3019 } 3020 } 3021 else 3022 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,SyncCR3DstNotPresent)); 3023 pPDEDst++; 3188 pPDEDst++; 3189 } 3024 3190 } 3025 } 3026 # if PGM_GST_TYPE == PGM_TYPE_PAE 3027 else if (iPD + iPDPTE * X86_PG_PAE_ENTRIES != iPdNoMapping) 3028 # else 3029 else if (iPD != iPdNoMapping) 3030 # endif 3031 { 3032 /* 3033 * Check if there is any page directory to mark not present here. 3034 */ 3035 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT 3036 for (unsigned i = 0, iPdShw = iPD * 2; i < 2; i++, iPdShw++) /* pray that the compiler unrolls this */ 3037 # elif PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64 3038 const unsigned iPdShw = iPD + iPDPTE * X86_PG_PAE_ENTRIES; NOREF(iPdShw); 3039 # else 3040 const unsigned iPdShw = iPD; NOREF(iPdShw); 3041 # endif 3191 else 3042 3192 { 3043 if (pPDEDst->n.u1Present)3044 {3045 pgmPoolFreeByPage(pPool, pgmPoolGetPage(pPool, pPDEDst->u & SHW_PDE_PG_MASK), SHW_POOL_ROOT_IDX, iPdShw);3046 pPDEDst->u = 0;3047 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,SyncCR3DstFreedSrcNP));3048 }3049 pPDEDst++;3050 }3051 }3052 else3053 {3054 3193 # if ( PGM_GST_TYPE == PGM_TYPE_32BIT \ 3055 3194 || PGM_GST_TYPE == PGM_TYPE_PAE) \ 3056 3195 && !defined(PGM_WITHOUT_MAPPINGS) 3057 3196 3058 const unsigned cPTs = pMapping->cb >> GST_PD_SHIFT; 3059 3060 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s)); 3061 if (pVM->pgm.s.fMappingsFixed) 3062 { 3063 /* It's fixed, just skip the mapping. */ 3064 pMapping = pMapping->CTXALLSUFF(pNext); 3065 iPdNoMapping = pMapping ? pMapping->GCPtr >> GST_PD_SHIFT : ~0U; 3066 } 3067 else 3068 { 3069 /* 3070 * Check for conflicts for subsequent pagetables 3071 * and advance to the next mapping. 3072 */ 3073 iPdNoMapping = ~0U; 3074 unsigned iPT = cPTs; 3075 while (iPT-- > 1) 3076 { 3077 if ( pPDSrc->a[iPD + iPT].n.u1Present 3078 && (pPDSrc->a[iPD + iPT].n.u1User || fRawR0Enabled)) 3197 const unsigned cPTs = pMapping->cb >> GST_PD_SHIFT; 3198 3199 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s)); 3200 if (pVM->pgm.s.fMappingsFixed) 3201 { 3202 /* It's fixed, just skip the mapping. */ 3203 pMapping = pMapping->CTXALLSUFF(pNext); 3204 iPdNoMapping = pMapping ? pMapping->GCPtr >> GST_PD_SHIFT : ~0U; 3205 } 3206 else 3207 { 3208 /* 3209 * Check for conflicts for subsequent pagetables 3210 * and advance to the next mapping. 3211 */ 3212 iPdNoMapping = ~0U; 3213 unsigned iPT = cPTs; 3214 while (iPT-- > 1) 3079 3215 { 3216 if ( pPDSrc->a[iPD + iPT].n.u1Present 3217 && (pPDSrc->a[iPD + iPT].n.u1User || fRawR0Enabled)) 3218 { 3080 3219 # ifdef IN_RING3 3081 3220 # if PGM_GST_TYPE == PGM_TYPE_32BIT 3082 int rc = pgmR3SyncPTResolveConflict(pVM, pMapping, pPDSrc, iPD << GST_PD_SHIFT);3221 int rc = pgmR3SyncPTResolveConflict(pVM, pMapping, pPDSrc, iPD << GST_PD_SHIFT); 3083 3222 # elif PGM_GST_TYPE == PGM_TYPE_PAE 3084 int rc = pgmR3SyncPTResolveConflictPAE(pVM, pMapping, (iPDPTE << GST_PDPT_SHIFT) + (iPD << GST_PD_SHIFT));3223 int rc = pgmR3SyncPTResolveConflictPAE(pVM, pMapping, (iPDPTE << GST_PDPT_SHIFT) + (iPD << GST_PD_SHIFT)); 3085 3224 # endif 3086 if (VBOX_FAILURE(rc))3087 return rc;3088 3089 /*3090 * Update iPdNoMapping and pMapping.3091 */3092 pMapping = pVM->pgm.s.CTXALLSUFF(pMappings);3093 while (pMapping && pMapping->GCPtr < (iPD << GST_PD_SHIFT))3094 pMapping = pMapping->CTXALLSUFF(pNext);3095 iPdNoMapping = pMapping ? pMapping->GCPtr >> GST_PD_SHIFT : ~0U;3096 break;3225 if (VBOX_FAILURE(rc)) 3226 return rc; 3227 3228 /* 3229 * Update iPdNoMapping and pMapping. 3230 */ 3231 pMapping = pVM->pgm.s.CTXALLSUFF(pMappings); 3232 while (pMapping && pMapping->GCPtr < (iPD << GST_PD_SHIFT)) 3233 pMapping = pMapping->CTXALLSUFF(pNext); 3234 iPdNoMapping = pMapping ? pMapping->GCPtr >> GST_PD_SHIFT : ~0U; 3235 break; 3097 3236 # else 3098 LogFlow(("SyncCR3: detected conflict -> VINF_PGM_SYNC_CR3\n"));3099 return VINF_PGM_SYNC_CR3;3237 LogFlow(("SyncCR3: detected conflict -> VINF_PGM_SYNC_CR3\n")); 3238 return VINF_PGM_SYNC_CR3; 3100 3239 # endif 3240 } 3101 3241 } 3102 } 3103 if (iPdNoMapping == ~0U && pMapping) 3104 { 3105 pMapping = pMapping->CTXALLSUFF(pNext); 3106 if (pMapping) 3107 iPdNoMapping = pMapping->GCPtr >> GST_PD_SHIFT; 3108 } 3109 } 3110 3111 /* advance. */ 3112 iPD += cPTs - 1; 3113 pPDEDst += cPTs + (PGM_GST_TYPE != PGM_SHW_TYPE) * cPTs; /* Only applies to the pae shadow and 32 bits guest case */ 3242 if (iPdNoMapping == ~0U && pMapping) 3243 { 3244 pMapping = pMapping->CTXALLSUFF(pNext); 3245 if (pMapping) 3246 iPdNoMapping = pMapping->GCPtr >> GST_PD_SHIFT; 3247 } 3248 } 3249 3250 /* advance. */ 3251 iPD += cPTs - 1; 3252 pPDEDst += cPTs + (PGM_GST_TYPE != PGM_SHW_TYPE) * cPTs; /* Only applies to the pae shadow and 32 bits guest case */ 3114 3253 # if PGM_GST_TYPE != PGM_SHW_TYPE 3115 AssertCompile(PGM_GST_TYPE == PGM_TYPE_32BIT && PGM_SHW_TYPE == PGM_TYPE_PAE);3254 AssertCompile(PGM_GST_TYPE == PGM_TYPE_32BIT && PGM_SHW_TYPE == PGM_TYPE_PAE); 3116 3255 # endif 3117 3256 # else /* (PGM_GST_TYPE != PGM_TYPE_32BIT && PGM_GST_TYPE != PGM_TYPE_PAE) || PGM_WITHOUT_MAPPINGS */ 3118 Assert(!pgmMapAreMappingsEnabled(&pVM->pgm.s));3257 Assert(!pgmMapAreMappingsEnabled(&pVM->pgm.s)); 3119 3258 # endif /* (PGM_GST_TYPE != PGM_TYPE_32BIT && PGM_GST_TYPE != PGM_TYPE_PAE) || PGM_WITHOUT_MAPPINGS */ 3120 }3121 3122 } /* for iPD */3123 } /* for each PDPTE (PAE) */3124 3259 } 3260 3261 } /* for iPD */ 3262 } /* for each PDPTE (PAE) */ 3263 } /* for each page map level 4 entry (amd64) */ 3125 3264 return VINF_SUCCESS; 3126 3265 3127 # elif PGM_GST_TYPE == PGM_TYPE_AMD643128 //# error not implemented3129 return VERR_INTERNAL_ERROR;3130 3266 # else /* guest real and protected mode */ 3131 3267 return VINF_SUCCESS;
Note:
See TracChangeset
for help on using the changeset viewer.