Changeset 18665 in vbox for trunk/src/VBox/VMM/PGMPhys.cpp
- Timestamp:
- Apr 2, 2009 7:44:18 PM (16 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/PGMPhys.cpp
r18645 r18665 346 346 347 347 348 #ifdef VBOX_WITH_NEW_PHYS_CODE349 348 /** 350 349 * VMR3ReqCall worker for PGMR3PhysGCPhys2CCPtrExternal to make pages writable. … … 386 385 return rc; 387 386 } 388 #endif /* VBOX_WITH_NEW_PHYS_CODE */389 387 390 388 … … 421 419 AssertPtr(pLock); 422 420 423 #ifdef VBOX_WITH_NEW_PHYS_CODE424 421 int rc = pgmLock(pVM); 425 422 AssertRCReturn(rc, rc); … … 483 480 pgmUnlock(pVM); 484 481 return rc; 485 486 #else /* !VBOX_WITH_NEW_PHYS_CODE */487 /*488 * Fallback code.489 */490 return PGMPhysGCPhys2R3Ptr(pVM, GCPhys, 1, (PRTR3PTR)ppv);491 #endif /* !VBOX_WITH_NEW_PHYS_CODE */492 482 } 493 483 … … 517 507 VMMR3DECL(int) PGMR3PhysGCPhys2CCPtrReadOnlyExternal(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock) 518 508 { 519 #ifdef VBOX_WITH_NEW_PHYS_CODE520 509 int rc = pgmLock(pVM); 521 510 AssertRCReturn(rc, rc); … … 560 549 pgmUnlock(pVM); 561 550 return rc; 562 563 #else /* !VBOX_WITH_NEW_PHYS_CODE */564 /*565 * Fallback code.566 */567 return PGMPhysGCPhys2CCPtr(pVM, GCPhys, (void **)ppv, pLock);568 #endif /* !VBOX_WITH_NEW_PHYS_CODE */569 551 } 570 552 … … 716 698 717 699 718 #ifdef VBOX_WITH_NEW_PHYS_CODE719 700 /** 720 701 * Frees a range of pages, replacing them with ZERO pages of the specified type. … … 757 738 return rc; 758 739 } 759 #endif /* VBOX_WITH_NEW_PHYS_CODE */760 740 761 741 … … 972 952 return rc; 973 953 974 #ifdef VBOX_WITH_NEW_PHYS_CODE975 954 if ( GCPhys >= _4G 976 955 && cPages > 256) … … 1025 1004 } 1026 1005 else 1027 #endif1028 1006 { 1029 1007 /* … … 1035 1013 AssertLogRelMsgRCReturn(rc, ("cbRamRange=%zu\n", cbRamRange), rc); 1036 1014 1037 #ifndef VBOX_WITH_NEW_PHYS_CODE1038 /* Allocate memory for chunk to HC ptr lookup array. */1039 pNew->paChunkR3Ptrs = NULL;1040 rc = MMHyperAlloc(pVM, (cb >> PGM_DYNAMIC_CHUNK_SHIFT) * sizeof(void *), 16, MM_TAG_PGM, (void **)&pNew->paChunkR3Ptrs);1041 AssertRCReturn(rc, rc);1042 pNew->fFlags |= MM_RAM_FLAGS_DYNAMIC_ALLOC;1043 #endif1044 1045 1015 pgmR3PhysInitAndLinkRamRange(pVM, pNew, GCPhys, GCPhysLast, NIL_RTRCPTR, NIL_RTR0PTR, pszDesc, pPrev); 1046 1016 } … … 1049 1019 * Notify REM. 1050 1020 */ 1051 #ifdef VBOX_WITH_NEW_PHYS_CODE1052 1021 REMR3NotifyPhysRamRegister(pVM, GCPhys, cb, REM_NOTIFY_PHYS_RAM_FLAGS_RAM); 1053 #else1054 REMR3NotifyPhysRamRegister(pVM, GCPhys, cb, MM_RAM_FLAGS_DYNAMIC_ALLOC);1055 #endif1056 1022 1057 1023 return VINF_SUCCESS; … … 1069 1035 int pgmR3PhysRamReset(PVM pVM) 1070 1036 { 1071 #ifdef VBOX_WITH_NEW_PHYS_CODE1072 1037 /* 1073 1038 * We batch up pages before freeing them. … … 1077 1042 int rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE); 1078 1043 AssertLogRelRCReturn(rc, rc); 1079 #endif1080 1044 1081 1045 /* … … 1087 1051 AssertMsg(((RTGCPHYS)iPage << PAGE_SHIFT) == pRam->cb, ("%RGp %RGp\n", (RTGCPHYS)iPage << PAGE_SHIFT, pRam->cb)); 1088 1052 1089 #ifdef VBOX_WITH_NEW_PHYS_CODE1090 1053 if (!pVM->pgm.s.fRamPreAlloc) 1091 1054 { … … 1119 1082 } 1120 1083 else 1121 #endif1122 1084 { 1123 1085 /* Zero the memory. */ … … 1127 1089 switch (PGM_PAGE_GET_TYPE(pPage)) 1128 1090 { 1129 #ifndef VBOX_WITH_NEW_PHYS_CODE1130 case PGMPAGETYPE_INVALID:1131 case PGMPAGETYPE_RAM:1132 if (pRam->aPages[iPage].HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)) /** @todo PAGE FLAGS */1133 {1134 /* shadow ram is reloaded elsewhere. */1135 Log4(("PGMR3Reset: not clearing phys page %RGp due to flags %RHp\n", pRam->GCPhys + (iPage << PAGE_SHIFT), pRam->aPages[iPage].HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO))); /** @todo PAGE FLAGS */1136 continue;1137 }1138 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)1139 {1140 unsigned iChunk = iPage >> (PGM_DYNAMIC_CHUNK_SHIFT - PAGE_SHIFT);1141 if (pRam->paChunkR3Ptrs[iChunk])1142 ASMMemZero32((char *)pRam->paChunkR3Ptrs[iChunk] + ((iPage << PAGE_SHIFT) & PGM_DYNAMIC_CHUNK_OFFSET_MASK), PAGE_SIZE);1143 }1144 else1145 ASMMemZero32((char *)pRam->pvR3 + (iPage << PAGE_SHIFT), PAGE_SIZE);1146 break;1147 #else /* VBOX_WITH_NEW_PHYS_CODE */1148 1091 case PGMPAGETYPE_RAM: 1149 1092 switch (PGM_PAGE_GET_STATE(pPage)) … … 1166 1109 } 1167 1110 break; 1168 #endif /* VBOX_WITH_NEW_PHYS_CODE */1169 1111 1170 1112 case PGMPAGETYPE_MMIO2_ALIAS_MMIO: … … 1186 1128 } 1187 1129 1188 #ifdef VBOX_WITH_NEW_PHYS_CODE1189 1130 /* 1190 1131 * Finish off any pages pending freeing. … … 1196 1137 } 1197 1138 GMMR3FreePagesCleanup(pReq); 1198 #endif1199 1200 1139 1201 1140 return VINF_SUCCESS; … … 1285 1224 { 1286 1225 pNew = NULL; 1287 #ifdef VBOX_WITH_NEW_PHYS_CODE 1226 1288 1227 /* 1289 1228 * Make all the pages in the range MMIO/ZERO pages, freeing any … … 1298 1237 } 1299 1238 AssertRCReturn(rc, rc); 1300 #endif1301 1239 } 1302 1240 else … … 1325 1263 1326 1264 pNew->pvR3 = NULL; 1327 #ifndef VBOX_WITH_NEW_PHYS_CODE1328 pNew->paChunkR3Ptrs = NULL;1329 #endif1330 1265 1331 1266 uint32_t iPage = cPages; … … 1411 1346 { 1412 1347 fAllMMIO = false; 1413 #ifdef VBOX_WITH_NEW_PHYS_CODE1414 1348 Assert(PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO2_ALIAS_MMIO); 1415 1349 AssertMsgFailed(("%RGp %R[pgmpage]\n", pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), pPage)); 1416 #endif1417 1350 break; 1418 1351 } … … 1438 1371 } 1439 1372 1440 #ifdef VBOX_WITH_NEW_PHYS_CODE1441 1373 /* 1442 1374 * Range match? It will all be within one range (see PGMAllHandler.cpp). … … 1463 1395 break; 1464 1396 } 1465 #endif1466 1397 1467 1398 /* next */ … … 1584 1515 1585 1516 pNew->RamRange.pvR3 = pvPages; 1586 #ifndef VBOX_WITH_NEW_PHYS_CODE1587 pNew->RamRange.paChunkR3Ptrs = NULL;1588 #endif1589 1517 1590 1518 uint32_t iPage = cPages; … … 2671 2599 } 2672 2600 2673 #ifndef VBOX_WITH_NEW_PHYS_CODE2674 2675 /**2676 * Interface that the MMR3RamRegister(), MMR3RomRegister() and MMIO handler2677 * registration APIs calls to inform PGM about memory registrations.2678 *2679 * It registers the physical memory range with PGM. MM is responsible2680 * for the toplevel things - allocation and locking - while PGM is taking2681 * care of all the details and implements the physical address space virtualization.2682 *2683 * @returns VBox status.2684 * @param pVM The VM handle.2685 * @param pvRam HC virtual address of the RAM range. (page aligned)2686 * @param GCPhys GC physical address of the RAM range. (page aligned)2687 * @param cb Size of the RAM range. (page aligned)2688 * @param fFlags Flags, MM_RAM_*.2689 * @param paPages Pointer an array of physical page descriptors.2690 * @param pszDesc Description string.2691 */2692 VMMR3DECL(int) PGMR3PhysRegister(PVM pVM, void *pvRam, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, const SUPPAGE *paPages, const char *pszDesc)2693 {2694 /*2695 * Validate input.2696 * (Not so important because callers are only MMR3PhysRegister()2697 * and PGMR3HandlerPhysicalRegisterEx(), but anyway...)2698 */2699 Log(("PGMR3PhysRegister %08X %x bytes flags %x %s\n", GCPhys, cb, fFlags, pszDesc));2700 2701 Assert((fFlags & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_DYNAMIC_ALLOC)) || paPages);2702 /*Assert(!(fFlags & MM_RAM_FLAGS_RESERVED) || !paPages);*/2703 Assert((fFlags == (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO)) || (fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC) || pvRam);2704 /*Assert(!(fFlags & MM_RAM_FLAGS_RESERVED) || !pvRam);*/2705 Assert(!(fFlags & ~0xfff));2706 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);2707 Assert(RT_ALIGN_P(pvRam, PAGE_SIZE) == pvRam);2708 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_DYNAMIC_ALLOC)));2709 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);2710 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);2711 if (GCPhysLast < GCPhys)2712 {2713 AssertMsgFailed(("The range wraps! GCPhys=%RGp cb=%#x\n", GCPhys, cb));2714 return VERR_INVALID_PARAMETER;2715 }2716 2717 /*2718 * Find range location and check for conflicts.2719 */2720 PPGMRAMRANGE pPrev = NULL;2721 PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesR3;2722 while (pCur)2723 {2724 if (GCPhys <= pCur->GCPhysLast && GCPhysLast >= pCur->GCPhys)2725 {2726 AssertMsgFailed(("Conflict! This cannot happen!\n"));2727 return VERR_PGM_RAM_CONFLICT;2728 }2729 if (GCPhysLast < pCur->GCPhys)2730 break;2731 2732 /* next */2733 pPrev = pCur;2734 pCur = pCur->pNextR3;2735 }2736 2737 /*2738 * Allocate RAM range.2739 * Small ranges are allocated from the heap, big ones have separate mappings.2740 */2741 size_t cbRam = RT_OFFSETOF(PGMRAMRANGE, aPages[cb >> PAGE_SHIFT]);2742 PPGMRAMRANGE pNew;2743 int rc = VERR_NO_MEMORY;2744 if (cbRam > PAGE_SIZE / 2)2745 { /* large */2746 cbRam = RT_ALIGN_Z(cbRam, PAGE_SIZE);2747 rc = MMR3HyperAllocOnceNoRel(pVM, cbRam, PAGE_SIZE, MM_TAG_PGM_PHYS, (void **)&pNew);2748 AssertMsgRC(rc, ("MMR3HyperAllocOnceNoRel(,%#x,,) -> %Rrc\n", cbRam, rc));2749 }2750 else2751 { /* small */2752 rc = MMHyperAlloc(pVM, cbRam, 16, MM_TAG_PGM, (void **)&pNew);2753 AssertMsgRC(rc, ("MMHyperAlloc(,%#x,,,) -> %Rrc\n", cbRam, rc));2754 }2755 if (RT_SUCCESS(rc))2756 {2757 /*2758 * Initialize the range.2759 */2760 pNew->pvR3 = pvRam;2761 pNew->GCPhys = GCPhys;2762 pNew->GCPhysLast = GCPhysLast;2763 pNew->cb = cb;2764 pNew->fFlags = fFlags;2765 pNew->paChunkR3Ptrs = NULL;2766 2767 unsigned iPage = (unsigned)(cb >> PAGE_SHIFT);2768 if (paPages)2769 {2770 while (iPage-- > 0)2771 {2772 PGM_PAGE_INIT(&pNew->aPages[iPage], paPages[iPage].Phys & X86_PTE_PAE_PG_MASK, NIL_GMM_PAGEID,2773 fFlags & MM_RAM_FLAGS_MMIO2 ? PGMPAGETYPE_MMIO2 : PGMPAGETYPE_RAM,2774 PGM_PAGE_STATE_ALLOCATED);2775 pNew->aPages[iPage].HCPhys |= fFlags; /** @todo PAGE FLAGS*/2776 }2777 }2778 else if (fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)2779 {2780 /* Allocate memory for chunk to HC ptr lookup array. */2781 rc = MMHyperAlloc(pVM, (cb >> PGM_DYNAMIC_CHUNK_SHIFT) * sizeof(void *), 16, MM_TAG_PGM, (void **)&pNew->paChunkR3Ptrs);2782 AssertMsgReturn(rc == VINF_SUCCESS, ("MMHyperAlloc(,%#x,,,) -> %Rrc\n", cbRam, cb), rc);2783 2784 /* Physical memory will be allocated on demand. */2785 while (iPage-- > 0)2786 {2787 PGM_PAGE_INIT(&pNew->aPages[iPage], 0, NIL_GMM_PAGEID, PGMPAGETYPE_RAM, PGM_PAGE_STATE_ZERO);2788 pNew->aPages[iPage].HCPhys = fFlags; /** @todo PAGE FLAGS */2789 }2790 }2791 else2792 {2793 Assert(fFlags == (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO));2794 RTHCPHYS HCPhysDummyPage = MMR3PageDummyHCPhys(pVM);2795 while (iPage-- > 0)2796 {2797 PGM_PAGE_INIT(&pNew->aPages[iPage], HCPhysDummyPage, NIL_GMM_PAGEID, PGMPAGETYPE_MMIO, PGM_PAGE_STATE_ZERO);2798 pNew->aPages[iPage].HCPhys |= fFlags; /** @todo PAGE FLAGS*/2799 }2800 }2801 2802 /*2803 * Insert the new RAM range.2804 */2805 pgmLock(pVM);2806 pNew->pNextR3 = pCur;2807 pNew->pNextR0 = pCur ? MMHyperCCToR0(pVM, pCur) : NIL_RTR0PTR;2808 pNew->pNextRC = pCur ? MMHyperCCToRC(pVM, pCur) : NIL_RTRCPTR;2809 if (pPrev)2810 {2811 pPrev->pNextR3 = pNew;2812 pPrev->pNextR0 = MMHyperCCToR0(pVM, pNew);2813 pPrev->pNextRC = MMHyperCCToRC(pVM, pNew);2814 }2815 else2816 {2817 pVM->pgm.s.pRamRangesR3 = pNew;2818 pVM->pgm.s.pRamRangesR0 = MMHyperCCToR0(pVM, pNew);2819 pVM->pgm.s.pRamRangesRC = MMHyperCCToRC(pVM, pNew);2820 }2821 pgmUnlock(pVM);2822 }2823 return rc;2824 }2825 2826 2827 /**2828 * Register a chunk of a the physical memory range with PGM. MM is responsible2829 * for the toplevel things - allocation and locking - while PGM is taking2830 * care of all the details and implements the physical address space virtualization.2831 *2832 *2833 * @returns VBox status.2834 * @param pVM The VM handle.2835 * @param pvRam HC virtual address of the RAM range. (page aligned)2836 * @param GCPhys GC physical address of the RAM range. (page aligned)2837 * @param cb Size of the RAM range. (page aligned)2838 * @param fFlags Flags, MM_RAM_*.2839 * @param paPages Pointer an array of physical page descriptors.2840 * @param pszDesc Description string.2841 */2842 VMMR3DECL(int) PGMR3PhysRegisterChunk(PVM pVM, void *pvRam, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, const SUPPAGE *paPages, const char *pszDesc)2843 {2844 NOREF(pszDesc);2845 2846 /*2847 * Validate input.2848 * (Not so important because callers are only MMR3PhysRegister()2849 * and PGMR3HandlerPhysicalRegisterEx(), but anyway...)2850 */2851 Log(("PGMR3PhysRegisterChunk %08X %x bytes flags %x %s\n", GCPhys, cb, fFlags, pszDesc));2852 2853 Assert(paPages);2854 Assert(pvRam);2855 Assert(!(fFlags & ~0xfff));2856 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);2857 Assert(RT_ALIGN_P(pvRam, PAGE_SIZE) == pvRam);2858 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_DYNAMIC_ALLOC)));2859 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);2860 Assert(VM_IS_EMT(pVM));2861 Assert(!(GCPhys & PGM_DYNAMIC_CHUNK_OFFSET_MASK));2862 Assert(cb == PGM_DYNAMIC_CHUNK_SIZE);2863 2864 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);2865 if (GCPhysLast < GCPhys)2866 {2867 AssertMsgFailed(("The range wraps! GCPhys=%RGp cb=%#x\n", GCPhys, cb));2868 return VERR_INVALID_PARAMETER;2869 }2870 2871 /*2872 * Find existing range location.2873 */2874 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);2875 while (pRam)2876 {2877 RTGCPHYS off = GCPhys - pRam->GCPhys;2878 if ( off < pRam->cb2879 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC))2880 break;2881 2882 pRam = pRam->CTX_SUFF(pNext);2883 }2884 AssertReturn(pRam, VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS);2885 2886 unsigned off = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;2887 unsigned iPage = (unsigned)(cb >> PAGE_SHIFT);2888 if (paPages)2889 {2890 while (iPage-- > 0)2891 pRam->aPages[off + iPage].HCPhys = (paPages[iPage].Phys & X86_PTE_PAE_PG_MASK) | fFlags; /** @todo PAGE FLAGS */2892 }2893 off >>= (PGM_DYNAMIC_CHUNK_SHIFT - PAGE_SHIFT);2894 pRam->paChunkR3Ptrs[off] = (uintptr_t)pvRam;2895 2896 /* Notify the recompiler. */2897 REMR3NotifyPhysRamChunkRegister(pVM, GCPhys, PGM_DYNAMIC_CHUNK_SIZE, (RTHCUINTPTR)pvRam, fFlags);2898 2899 return VINF_SUCCESS;2900 }2901 2902 2903 /**2904 * Allocate missing physical pages for an existing guest RAM range.2905 *2906 * @returns VBox status.2907 * @param pVM The VM handle.2908 * @param GCPhys GC physical address of the RAM range. (page aligned)2909 */2910 VMMR3DECL(int) PGM3PhysGrowRange(PVM pVM, PCRTGCPHYS pGCPhys)2911 {2912 RTGCPHYS GCPhys = *pGCPhys;2913 2914 /*2915 * Walk range list.2916 */2917 pgmLock(pVM);2918 2919 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);2920 while (pRam)2921 {2922 RTGCPHYS off = GCPhys - pRam->GCPhys;2923 if ( off < pRam->cb2924 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC))2925 {2926 bool fRangeExists = false;2927 unsigned off = (GCPhys - pRam->GCPhys) >> PGM_DYNAMIC_CHUNK_SHIFT;2928 2929 /* Note: A request made from another thread may end up in EMT after somebody else has already allocated the range. */2930 if (pRam->paChunkR3Ptrs[off])2931 fRangeExists = true;2932 2933 pgmUnlock(pVM);2934 if (fRangeExists)2935 return VINF_SUCCESS;2936 return pgmr3PhysGrowRange(pVM, GCPhys);2937 }2938 2939 pRam = pRam->CTX_SUFF(pNext);2940 }2941 pgmUnlock(pVM);2942 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;2943 }2944 2945 2946 /**2947 * Allocate missing physical pages for an existing guest RAM range.2948 *2949 * @returns VBox status.2950 * @param pVM The VM handle.2951 * @param pRamRange RAM range2952 * @param GCPhys GC physical address of the RAM range. (page aligned)2953 */2954 int pgmr3PhysGrowRange(PVM pVM, RTGCPHYS GCPhys)2955 {2956 void *pvRam;2957 int rc;2958 2959 /* We must execute this function in the EMT thread, otherwise we'll run into problems. */2960 if (!VM_IS_EMT(pVM))2961 {2962 PVMREQ pReq;2963 const RTGCPHYS GCPhysParam = GCPhys;2964 2965 AssertMsg(!PDMCritSectIsOwner(&pVM->pgm.s.CritSect), ("We own the PGM lock -> deadlock danger!!\n"));2966 2967 rc = VMR3ReqCall(pVM, VMREQDEST_ANY, &pReq, RT_INDEFINITE_WAIT, (PFNRT)PGM3PhysGrowRange, 2, pVM, &GCPhysParam);2968 if (RT_SUCCESS(rc))2969 {2970 rc = pReq->iStatus;2971 VMR3ReqFree(pReq);2972 }2973 return rc;2974 }2975 2976 /* Round down to chunk boundary */2977 GCPhys = GCPhys & PGM_DYNAMIC_CHUNK_BASE_MASK;2978 2979 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DynRamGrow);2980 STAM_COUNTER_ADD(&pVM->pgm.s.StatR3DynRamTotal, PGM_DYNAMIC_CHUNK_SIZE/(1024*1024));2981 2982 Log(("pgmr3PhysGrowRange: allocate chunk of size 0x%X at %RGp\n", PGM_DYNAMIC_CHUNK_SIZE, GCPhys));2983 2984 unsigned cPages = PGM_DYNAMIC_CHUNK_SIZE >> PAGE_SHIFT;2985 2986 for (;;)2987 {2988 rc = SUPPageAlloc(cPages, &pvRam);2989 if (RT_SUCCESS(rc))2990 {2991 rc = MMR3PhysRegisterEx(pVM, pvRam, GCPhys, PGM_DYNAMIC_CHUNK_SIZE, 0, MM_PHYS_TYPE_DYNALLOC_CHUNK, "Main Memory");2992 if (RT_SUCCESS(rc))2993 return rc;2994 2995 SUPPageFree(pvRam, cPages);2996 }2997 2998 VMSTATE enmVMState = VMR3GetState(pVM);2999 if (enmVMState != VMSTATE_RUNNING)3000 {3001 AssertMsgFailed(("Out of memory while trying to allocate a guest RAM chunk at %RGp!\n", GCPhys));3002 LogRel(("PGM: Out of memory while trying to allocate a guest RAM chunk at %RGp (VMstate=%s)!\n", GCPhys, VMR3GetStateName(enmVMState)));3003 return rc;3004 }3005 3006 LogRel(("pgmr3PhysGrowRange: out of memory. pause until the user resumes execution.\n"));3007 3008 /* Pause first, then inform Main. */3009 rc = VMR3SuspendNoSave(pVM);3010 AssertRC(rc);3011 3012 VMSetRuntimeError(pVM, 0/*fFlags*/, "HostMemoryLow", "Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM");3013 3014 /* Wait for resume event; will only return in that case. If the VM is stopped, the EMT thread will be destroyed. */3015 rc = VMR3WaitForResume(pVM);3016 3017 /* Retry */3018 LogRel(("pgmr3PhysGrowRange: VM execution resumed -> retry.\n"));3019 }3020 }3021 3022 3023 /**3024 * Interface MMR3RomRegister() and MMR3PhysReserve calls to update the3025 * flags of existing RAM ranges.3026 *3027 * @returns VBox status.3028 * @param pVM The VM handle.3029 * @param GCPhys GC physical address of the RAM range. (page aligned)3030 * @param cb Size of the RAM range. (page aligned)3031 * @param fFlags The Or flags, MM_RAM_* \#defines.3032 * @param fMask The and mask for the flags.3033 */3034 VMMR3DECL(int) PGMR3PhysSetFlags(PVM pVM, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, unsigned fMask)3035 {3036 Log(("PGMR3PhysSetFlags %08X %x %x %x\n", GCPhys, cb, fFlags, fMask));3037 3038 /*3039 * Validate input.3040 * (Not so important because caller is always MMR3RomRegister() and MMR3PhysReserve(), but anyway...)3041 */3042 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)));3043 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);3044 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);3045 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);3046 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);3047 3048 /*3049 * Lookup the range.3050 */3051 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);3052 while (pRam && GCPhys > pRam->GCPhysLast)3053 pRam = pRam->CTX_SUFF(pNext);3054 if ( !pRam3055 || GCPhys > pRam->GCPhysLast3056 || GCPhysLast < pRam->GCPhys)3057 {3058 AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));3059 return VERR_INVALID_PARAMETER;3060 }3061 3062 /*3063 * Update the requested flags.3064 */3065 RTHCPHYS fFullMask = ~(RTHCPHYS)(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)3066 | fMask;3067 unsigned iPageEnd = (GCPhysLast - pRam->GCPhys + 1) >> PAGE_SHIFT;3068 unsigned iPage = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;3069 for ( ; iPage < iPageEnd; iPage++)3070 pRam->aPages[iPage].HCPhys = (pRam->aPages[iPage].HCPhys & fFullMask) | fFlags; /** @todo PAGE FLAGS */3071 3072 return VINF_SUCCESS;3073 }3074 3075 #endif /* !VBOX_WITH_NEW_PHYS_CODE */3076 2601 3077 2602 /** … … 3607 3132 if (RT_SUCCESS(rc)) 3608 3133 { 3609 #ifdef VBOX_WITH_NEW_PHYS_CODE3610 3134 if (!PGM_PAGE_HAS_ANY_HANDLERS(pPage)) 3611 3135 rc = VINF_SUCCESS; … … 3664 3188 3665 3189 /* else: handler catching all access, no pointer returned. */ 3666 3667 #else3668 if (0)3669 /* nothing */;3670 else if (PGM_PAGE_HAS_ANY_HANDLERS(pPage))3671 {3672 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */3673 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;3674 else if (fWritable && PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))3675 rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;3676 else3677 {3678 /* Temporariliy disabled phycial handler(s), since the recompiler3679 doesn't get notified when it's reset we'll have to pretend its3680 operating normally. */3681 if (pgmHandlerPhysicalIsAll(pVM, GCPhys))3682 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;3683 else3684 rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;3685 }3686 }3687 else3688 rc = VINF_SUCCESS;3689 if (RT_SUCCESS(rc))3690 {3691 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)3692 {3693 AssertMsg(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM, ("GCPhys=%RGp type=%d\n", GCPhys, PGM_PAGE_GET_TYPE(pPage)));3694 RTGCPHYS off = GCPhys - pRam->GCPhys;3695 unsigned iChunk = (off >> PGM_DYNAMIC_CHUNK_SHIFT);3696 *ppv = (void *)(pRam->paChunkR3Ptrs[iChunk] + (off & PGM_DYNAMIC_CHUNK_OFFSET_MASK));3697 }3698 else if (RT_LIKELY(pRam->pvR3))3699 {3700 AssertMsg(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2, ("GCPhys=%RGp type=%d\n", GCPhys, PGM_PAGE_GET_TYPE(pPage)));3701 RTGCPHYS off = GCPhys - pRam->GCPhys;3702 *ppv = (uint8_t *)pRam->pvR3 + off;3703 }3704 else3705 rc = VERR_PGM_PHYS_TLB_UNASSIGNED;3706 }3707 #endif /* !VBOX_WITH_NEW_PHYS_CODE */3708 3190 } 3709 3191 else
Note:
See TracChangeset
for help on using the changeset viewer.