Changeset 5106 in vbox for trunk/src/VBox/VMM/VMMR0/GMMR0.cpp
- Timestamp:
- Sep 28, 2007 6:31:29 PM (18 years ago)
- svn:sync-xref-src-repo-rev:
- 24949
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/GMMR0.cpp
r5086 r5106 147 147 #include <VBox/log.h> 148 148 #include <VBox/param.h> 149 #include <VBox/err.h> 149 150 #include <iprt/avl.h> 150 151 #include <iprt/mem.h> … … 152 153 #include <iprt/semaphore.h> 153 154 #include <iprt/string.h> 155 156 /** @def GMM_MAX_GCPHYS 157 * The max guest physical address. 158 * This must reflect the constraints imposed by the RTGCPHYS type and the guest 159 * page frame number used internally in GMMPAGE. */ 160 #define GMM_MAX_GCPHYS UINT32_MAX 154 161 155 162 … … 792 799 * @param enmPolicy The OC policy to use on this VM. 793 800 * @param enmPriority The priority in an out-of-memory situation. 801 * 802 * @thread The creator thread / EMT. 794 803 */ 795 804 GMMR0DECL(int) GMMR0InitialReservation(PVM pVM, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages, … … 807 816 if (!pGVM) 808 817 return VERR_INVALID_PARAMETER; 818 if (pGVM->hEMT != RTThreadNativeSelf()) 819 return VERR_NOT_OWNER; 809 820 810 821 AssertReturn(cBasePages, VERR_INVALID_PARAMETER); … … 851 862 852 863 /** 864 * VMMR0 request wrapper for GMMR0InitialReservation. 865 * 866 * @returns see GMMR0InitialReservation. 867 * @param pVM Pointer to the shared VM structure. 868 * @param pReq The request packet. 869 */ 870 GMMR0DECL(int) GMMR0InitialReservationReq(PVM pVM, PGMMINITIALRESERVATIONREQ pReq) 871 { 872 /* 873 * Validate input and pass it on. 874 */ 875 AssertPtrReturn(pVM, VERR_INVALID_POINTER); 876 AssertPtrReturn(pReq, VERR_INVALID_POINTER); 877 AssertMsgReturn(pReq->Hdr.cbReq != sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER); 878 879 return GMMR0InitialReservation(pVM, pReq->cBasePages, pReq->cShadowPages, pReq->cFixedPages, pReq->enmPolicy, pReq->enmPriority); 880 } 881 882 883 /** 853 884 * This updates the memory reservation with the additional MMIO2 and ROM pages. 854 885 * … … 864 895 * @param enmPolicy The OC policy to use on this VM. 865 896 * @param enmPriority The priority in an out-of-memory situation. 897 * 898 * @thread EMT. 866 899 */ 867 900 GMMR0DECL(int) GMMR0UpdateReservation(PVM pVM, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages) … … 878 911 if (!pGVM) 879 912 return VERR_INVALID_PARAMETER; 913 if (pGVM->hEMT != RTThreadNativeSelf()) 914 return VERR_NOT_OWNER; 880 915 881 916 AssertReturn(cBasePages, VERR_INVALID_PARAMETER); … … 919 954 920 955 /** 956 * VMMR0 request wrapper for GMMR0UpdateReservation. 957 * 958 * @returns see GMMR0UpdateReservation. 959 * @param pVM Pointer to the shared VM structure. 960 * @param pReq The request packet. 961 */ 962 GMMR0DECL(int) GMMR0UpdateReservationReq(PVM pVM, PGMMUPDATERESERVATIONREQ pReq) 963 { 964 /* 965 * Validate input and pass it on. 966 */ 967 AssertPtrReturn(pVM, VERR_INVALID_POINTER); 968 AssertPtrReturn(pReq, VERR_INVALID_POINTER); 969 AssertMsgReturn(pReq->Hdr.cbReq != sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER); 970 971 return GMMR0UpdateReservation(pVM, pReq->cBasePages, pReq->cShadowPages, pReq->cFixedPages); 972 } 973 974 975 /** 976 * Finds a page. 977 * 978 * @returns Pointer to the page, NULL if not found. 979 * @param pGMM Pointer to the GMM instance. 980 * @param idPage The ID of the page to find. 981 */ 982 DECLINLINE(PGMMPAGE) gmmR0GetPage(PGMM pGMM, uint32_t idPage) 983 { 984 return NULL; 985 } 986 987 988 989 /** 990 * Common worker for GMMR0AllocateHandyPages and GMMR0AllocatePages. 991 * 992 * @returns VBox status code: 993 * @retval xxx 994 * 995 * @param pVM Pointer to the shared VM structure. 996 * @param cPages The number of pages to allocate. 997 * @param paPages Pointer to the page descriptors. 998 * See GMMPAGEDESC for details on what is expected on input. 999 * @param enmAccount The account to charge. 1000 */ 1001 static int gmmR0AllocatePages(PGMM pGMM, PGVM pGVM, uint32_t cPages, PGMMPAGEDESC paPages, GMMACCOUNT enmAccount) 1002 { 1003 1004 return VERR_NOT_IMPLEMENTED; 1005 } 1006 1007 1008 /** 921 1009 * Updates the previous allocations and allocates more pages. 922 1010 * … … 931 1019 * @param paPages The array of page descriptors. 932 1020 * See GMMPAGEDESC for details on what is expected on input. 1021 * @thread EMT. 933 1022 */ 934 1023 GMMR0DECL(int) GMMR0AllocateHandyPages(PVM pVM, uint32_t cPagesToUpdate, uint32_t cPagesToAlloc, PGMMPAGEDESC paPages) 935 1024 { 936 return VERR_NOT_IMPLEMENTED; 1025 /* 1026 * Validate, get basics and take the semaphore. 1027 * (This is a relatively busy path, so make predictions where possible.) 1028 */ 1029 PGMM pGMM; 1030 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR); 1031 PGVM pGVM = GVMMR0ByVM(pVM); 1032 if (RT_UNLIKELY(!pGVM)) 1033 return VERR_INVALID_PARAMETER; 1034 if (RT_UNLIKELY(pGVM->hEMT != RTThreadNativeSelf())) 1035 return VERR_NOT_OWNER; 1036 1037 AssertPtrReturn(paPages, VERR_INVALID_PARAMETER); 1038 AssertMsgReturn( (cPagesToUpdate && cPagesToUpdate < 1024) 1039 || (cPagesToAlloc && cPagesToAlloc < 1024), 1040 ("cPagesToUpdate=%#x cPagesToAlloc=%#x\n", cPagesToUpdate, cPagesToAlloc), 1041 VERR_INVALID_PARAMETER); 1042 1043 unsigned iPage = 0; 1044 for (; iPage < cPagesToUpdate; iPage++) 1045 { 1046 AssertMsgReturn( ( paPages[iPage].HCPhysGCPhys < GMM_MAX_GCPHYS 1047 && !(paPages[iPage].HCPhysGCPhys & PAGE_OFFSET_MASK)) 1048 || paPages[iPage].HCPhysGCPhys == NIL_RTHCPHYS 1049 || paPages[iPage].HCPhysGCPhys == GMM_GCPHYS_UNSHARABLE, 1050 ("#%#x: %RHp\n", iPage, paPages[iPage].HCPhysGCPhys), 1051 VERR_INVALID_PARAMETER); 1052 AssertMsgReturn( paPages[iPage].idPage <= GMM_PAGEID_LAST 1053 /*|| paPages[iPage].idPage == NIL_GMM_PAGEID*/, 1054 ("#%#x: %#x\n", iPage, paPages[iPage].idPage), VERR_INVALID_PARAMETER); 1055 AssertMsgReturn( paPages[iPage].idPage <= GMM_PAGEID_LAST 1056 /*|| paPages[iPage].idSharedPage == NIL_GMM_PAGEID*/, 1057 ("#%#x: %#x\n", iPage, paPages[iPage].idSharedPage), VERR_INVALID_PARAMETER); 1058 } 1059 1060 for (; iPage < cPagesToAlloc; iPage++) 1061 { 1062 AssertMsgReturn(paPages[iPage].HCPhysGCPhys == NIL_RTHCPHYS, ("#%#x: %RHp\n", iPage, paPages[iPage].HCPhysGCPhys), VERR_INVALID_PARAMETER); 1063 AssertMsgReturn(paPages[iPage].idPage == NIL_GMM_PAGEID, ("#%#x: %#x\n", iPage, paPages[iPage].idPage), VERR_INVALID_PARAMETER); 1064 AssertMsgReturn(paPages[iPage].idSharedPage == NIL_GMM_PAGEID, ("#%#x: %#x\n", iPage, paPages[iPage].idSharedPage), VERR_INVALID_PARAMETER); 1065 } 1066 1067 int rc = RTSemFastMutexRequest(pGMM->Mtx); 1068 AssertRC(rc); 1069 1070 /* No allocations before the initial reservation has been made! */ 1071 if (RT_LIKELY( pGVM->gmm.s.Reserved.cBasePages 1072 && pGVM->gmm.s.Reserved.cFixedPages 1073 && pGVM->gmm.s.Reserved.cShadowPages)) 1074 { 1075 /* 1076 * Do the updates. 1077 */ 1078 for (iPage = 0; iPage < cPagesToUpdate; iPage++) 1079 { 1080 if (paPages[iPage].idPage != NIL_GMM_PAGEID) 1081 { 1082 PGMMPAGE pPage = gmmR0GetPage(pGMM, paPages[iPage].idPage); 1083 if (RT_LIKELY(pPage)) 1084 { 1085 1086 paPages[iPage].idPage = NIL_GMM_PAGEID; 1087 paPages[iPage].HCPhysGCPhys = NIL_RTHCPHYS; 1088 } 1089 else 1090 rc = VERR_GMM_PAGE_NOT_FOUND; 1091 } 1092 1093 if (paPages[iPage].idSharedPage != NIL_GMM_PAGEID) 1094 { 1095 PGMMPAGE pPage = gmmR0GetPage(pGMM, paPages[iPage].idSharedPage); 1096 if (RT_LIKELY(pPage)) 1097 { 1098 1099 paPages[iPage].idSharedPage = NIL_GMM_PAGEID; 1100 } 1101 else 1102 rc = VERR_GMM_PAGE_NOT_FOUND; 1103 } 1104 } 1105 1106 /* 1107 * And now do the allocation. 1108 */ 1109 if (RT_SUCCESS(rc)) 1110 rc = gmmR0AllocatePages(pGMM, pGVM, cPagesToAlloc, paPages, GMMACCOUNT_BASE); 1111 } 1112 else 1113 rc = VERR_WRONG_ORDER; 1114 1115 RTSemFastMutexRelease(pGMM->Mtx); 1116 LogFlow(("GMMR0UpdateReservation: returns %Rrc\n", rc)); 1117 return rc; 937 1118 } 938 1119 … … 951 1132 * See GMMPAGEDESC for details on what is expected on input. 952 1133 * @param enmAccount The account to charge. 1134 * 1135 * @thread EMT. 953 1136 */ 954 1137 GMMR0DECL(int) GMMR0AllocatePages(PVM pVM, uint32_t cPages, PGMMPAGEDESC paPages, GMMACCOUNT enmAccount) 955 1138 { 956 return VERR_NOT_IMPLEMENTED; 1139 LogFlow(("GMMR0AllocatePages: pVM=%p cPages=%#x paPages=%p enmAccount=%d\n", pVM, cPages, paPages, enmAccount)); 1140 1141 /* 1142 * Validate, get basics and take the semaphore. 1143 */ 1144 PGMM pGMM; 1145 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR); 1146 PGVM pGVM = GVMMR0ByVM(pVM); 1147 if (!pGVM) 1148 return VERR_INVALID_PARAMETER; 1149 if (pGVM->hEMT != RTThreadNativeSelf()) 1150 return VERR_NOT_OWNER; 1151 1152 AssertPtrReturn(paPages, VERR_INVALID_PARAMETER); 1153 AssertMsgReturn(enmAccount > GMMACCOUNT_INVALID && enmAccount < GMMACCOUNT_END, ("%d\n", enmAccount), VERR_INVALID_PARAMETER); 1154 AssertMsgReturn(cPages > 0 && cPages < RT_BIT(32 - PAGE_SHIFT), ("%#x\n", cPages), VERR_INVALID_PARAMETER); 1155 1156 for (unsigned iPage = 0; iPage < cPages; iPage++) 1157 { 1158 AssertMsgReturn( paPages[iPage].HCPhysGCPhys == NIL_RTHCPHYS 1159 || paPages[iPage].HCPhysGCPhys == GMM_GCPHYS_UNSHARABLE 1160 || ( enmAccount == GMMACCOUNT_BASE 1161 && paPages[iPage].HCPhysGCPhys < GMM_MAX_GCPHYS 1162 && !(paPages[iPage].HCPhysGCPhys & PAGE_OFFSET_MASK)), 1163 ("#%#x: %RHp enmAccount=%d\n", iPage, paPages[iPage].HCPhysGCPhys, enmAccount), 1164 VERR_INVALID_PARAMETER); 1165 AssertMsgReturn(paPages[iPage].idPage == NIL_GMM_PAGEID, ("#%#x: %#x\n", iPage, paPages[iPage].idPage), VERR_INVALID_PARAMETER); 1166 AssertMsgReturn(paPages[iPage].idSharedPage == NIL_GMM_PAGEID, ("#%#x: %#x\n", iPage, paPages[iPage].idSharedPage), VERR_INVALID_PARAMETER); 1167 } 1168 1169 int rc = RTSemFastMutexRequest(pGMM->Mtx); 1170 AssertRC(rc); 1171 1172 /* No allocations before the initial reservation has been made! */ 1173 if ( pGVM->gmm.s.Reserved.cBasePages 1174 && pGVM->gmm.s.Reserved.cFixedPages 1175 && pGVM->gmm.s.Reserved.cShadowPages) 1176 rc = gmmR0AllocatePages(pGMM, pGVM, cPages, paPages, enmAccount); 1177 else 1178 rc = VERR_WRONG_ORDER; 1179 1180 RTSemFastMutexRelease(pGMM->Mtx); 1181 LogFlow(("GMMR0UpdateReservation: returns %Rrc\n", rc)); 1182 return rc; 1183 } 1184 1185 1186 /** 1187 * VMMR0 request wrapper for GMMR0AllocatePages. 1188 * 1189 * @returns see GMMR0AllocatePages. 1190 * @param pVM Pointer to the shared VM structure. 1191 * @param pReq The request packet. 1192 */ 1193 GMMR0DECL(int) GMMR0AllocatePagesReq(PVM pVM, PGMMALLOCATEPAGESREQ pReq) 1194 { 1195 /* 1196 * Validate input and pass it on. 1197 */ 1198 AssertPtrReturn(pVM, VERR_INVALID_POINTER); 1199 AssertPtrReturn(pReq, VERR_INVALID_POINTER); 1200 AssertMsgReturn(pReq->Hdr.cbReq >= RT_UOFFSETOF(GMMALLOCATEPAGESREQ, aPages[0]), 1201 ("%#x < %#x\n", pReq->Hdr.cbReq, RT_UOFFSETOF(GMMALLOCATEPAGESREQ, aPages[0])), 1202 VERR_INVALID_PARAMETER); 1203 AssertMsgReturn(pReq->Hdr.cbReq == RT_UOFFSETOF(GMMALLOCATEPAGESREQ, aPages[pReq->cPages]), 1204 ("%#x != %#x\n", pReq->Hdr.cbReq, RT_UOFFSETOF(GMMALLOCATEPAGESREQ, aPages[pReq->cPages])), 1205 VERR_INVALID_PARAMETER); 1206 1207 return GMMR0AllocatePages(pVM, pReq->cPages, &pReq->aPages[0], pReq->enmAccount); 957 1208 } 958 1209 … … 970 1221 * @param paPages Pointer to the page descriptors containing the Page IDs for each page. 971 1222 * @param enmAccount The account this relates to. 1223 * @thread EMT. 972 1224 */ 973 1225 GMMR0DECL(int) GMMR0FreePages(PVM pVM, uint32_t cPages, PGMMFREEPAGEDESC paPages, GMMACCOUNT enmAccount) 974 1226 { 975 1227 return VERR_NOT_IMPLEMENTED; 1228 } 1229 1230 1231 /** 1232 * VMMR0 request wrapper for GMMR0FreePages. 1233 * 1234 * @returns see GMMR0FreePages. 1235 * @param pVM Pointer to the shared VM structure. 1236 * @param pReq The request packet. 1237 */ 1238 GMMR0DECL(int) GMMR0FreePagesReq(PVM pVM, PGMMFREEPAGESREQ pReq) 1239 { 1240 /* 1241 * Validate input and pass it on. 1242 */ 1243 AssertPtrReturn(pVM, VERR_INVALID_POINTER); 1244 AssertPtrReturn(pReq, VERR_INVALID_POINTER); 1245 AssertMsgReturn(pReq->Hdr.cbReq >= RT_UOFFSETOF(GMMFREEPAGESREQ, aPages[0]), 1246 ("%#x < %#x\n", pReq->Hdr.cbReq, RT_UOFFSETOF(GMMFREEPAGESREQ, aPages[0])), 1247 VERR_INVALID_PARAMETER); 1248 AssertMsgReturn(pReq->Hdr.cbReq == RT_UOFFSETOF(GMMFREEPAGESREQ, aPages[pReq->cPages]), 1249 ("%#x != %#x\n", pReq->Hdr.cbReq, RT_UOFFSETOF(GMMFREEPAGESREQ, aPages[pReq->cPages])), 1250 VERR_INVALID_PARAMETER); 1251 1252 return GMMR0FreePages(pVM, pReq->cPages, &pReq->aPages[0], pReq->enmAccount); 976 1253 } 977 1254 … … 989 1266 * @param cPagesToFree The number of pages to be freed. 990 1267 * @param paPages Pointer to the page descriptors for the pages that's to be freed. 1268 * @thread EMT. 991 1269 */ 992 1270 GMMR0DECL(int) GMMR0BalloonedPages(PVM pVM, uint32_t cBalloonedPages, uint32_t cPagesToFree, PGMMFREEPAGEDESC paPages) … … 996 1274 997 1275 1276 /** 1277 * VMMR0 request wrapper for GMMR0BalloonedPages. 1278 * 1279 * @returns see GMMR0BalloonedPages. 1280 * @param pVM Pointer to the shared VM structure. 1281 * @param pReq The request packet. 1282 */ 1283 GMMR0DECL(int) GMMR0BalloonedPagesReq(PVM pVM, PGMMBALLOONEDPAGESREQ pReq) 1284 { 1285 /* 1286 * Validate input and pass it on. 1287 */ 1288 AssertPtrReturn(pVM, VERR_INVALID_POINTER); 1289 AssertPtrReturn(pReq, VERR_INVALID_POINTER); 1290 AssertMsgReturn(pReq->Hdr.cbReq >= RT_UOFFSETOF(GMMBALLOONEDPAGESREQ, aPages[0]), 1291 ("%#x < %#x\n", pReq->Hdr.cbReq, RT_UOFFSETOF(GMMBALLOONEDPAGESREQ, aPages[0])), 1292 VERR_INVALID_PARAMETER); 1293 AssertMsgReturn(pReq->Hdr.cbReq == RT_UOFFSETOF(GMMBALLOONEDPAGESREQ, aPages[pReq->cPagesToFree]), 1294 ("%#x != %#x\n", pReq->Hdr.cbReq, RT_UOFFSETOF(GMMBALLOONEDPAGESREQ, aPages[pReq->cPagesToFree])), 1295 VERR_INVALID_PARAMETER); 1296 1297 return GMMR0BalloonedPages(pVM, pReq->cBalloonedPages, pReq->cPagesToFree, &pReq->aPages[0]); 1298 } 1299 1300 1301 GMMR0DECL(int) GMMR0FreeMapUnmapChunk(PVM pVM, uint32_t idChunkMap, uint32_t idChunkUnmap, PRTR3PTR pvR3) 1302 { 1303 return VERR_NOT_IMPLEMENTED; 1304 } 1305 1306 1307 /** 1308 * VMMR0 request wrapper for GMMR0FreeMapUnmapChunk. 1309 * 1310 * @returns see GMMR0FreeMapUnmapChunk. 1311 * @param pVM Pointer to the shared VM structure. 1312 * @param pReq The request packet. 1313 */ 1314 GMMR0DECL(int) GMMR0FreeMapUnmapChunkReq(PVM pVM, PGMMMAPUNMAPCHUNKREQ pReq) 1315 { 1316 /* 1317 * Validate input and pass it on. 1318 */ 1319 AssertPtrReturn(pVM, VERR_INVALID_POINTER); 1320 AssertPtrReturn(pReq, VERR_INVALID_POINTER); 1321 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER); 1322 1323 return GMMR0FreeMapUnmapChunk(pVM, pReq->idChunkMap, pReq->idChunkUnmap, &pReq->pvR3); 1324 } 1325 1326 1327 GMMR0DECL(int) GMMR0SeedChunk(PVM pVM, RTR3PTR pvR3) 1328 { 1329 return VERR_NOT_IMPLEMENTED; 1330 } 1331
Note:
See TracChangeset
for help on using the changeset viewer.