Changeset 82589 in vbox for trunk/src/VBox/Runtime/r0drv
- Timestamp:
- Dec 16, 2019 5:07:01 PM (5 years ago)
- svn:sync-xref-src-repo-rev:
- 135474
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Runtime/r0drv/nt/memobj-r0drv-nt.cpp
r78314 r82589 78 78 /** Array of MDL pointers. (variable size) */ 79 79 PMDL apMdls[1]; 80 } RTR0MEMOBJNT, *PRTR0MEMOBJNT; 80 } RTR0MEMOBJNT; 81 /** Pointer to the NT version of the memory object structure. */ 82 typedef RTR0MEMOBJNT *PRTR0MEMOBJNT; 81 83 82 84 … … 114 116 case RTR0MEMOBJTYPE_PAGE: 115 117 Assert(pMemNt->Core.pv); 116 if (g_pfnrtExFreePoolWithTag) 117 g_pfnrtExFreePoolWithTag(pMemNt->Core.pv, IPRT_NT_POOL_TAG); 118 if (pMemNt->fAllocatedPagesForMdl) 119 { 120 Assert(pMemNt->Core.pv && pMemNt->cMdls == 1 && pMemNt->apMdls[0]); 121 Assert(pMemNt->pvSecureMem == NULL); 122 MmUnmapLockedPages(pMemNt->Core.pv, pMemNt->apMdls[0]); 123 g_pfnrtMmFreePagesFromMdl(pMemNt->apMdls[0]); 124 ExFreePool(pMemNt->apMdls[0]); 125 } 118 126 else 119 ExFreePool(pMemNt->Core.pv); 127 { 128 if (g_pfnrtExFreePoolWithTag) 129 g_pfnrtExFreePoolWithTag(pMemNt->Core.pv, IPRT_NT_POOL_TAG); 130 else 131 ExFreePool(pMemNt->Core.pv); 132 133 Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]); 134 IoFreeMdl(pMemNt->apMdls[0]); 135 } 120 136 pMemNt->Core.pv = NULL; 121 122 Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);123 IoFreeMdl(pMemNt->apMdls[0]);124 137 pMemNt->apMdls[0] = NULL; 125 138 pMemNt->cMdls = 0; … … 231 244 232 245 /* 246 * Use MmAllocatePagesForMdl if the allocation is a little bit big. 247 */ 248 int rc = VERR_NO_PAGE_MEMORY; 249 if ( cb > _1M 250 && g_pfnrtMmAllocatePagesForMdl 251 && g_pfnrtMmFreePagesFromMdl 252 && g_pfnrtMmMapLockedPagesSpecifyCache) 253 { 254 PHYSICAL_ADDRESS Zero; 255 Zero.QuadPart = 0; 256 PHYSICAL_ADDRESS HighAddr; 257 HighAddr.QuadPart = MAXLONGLONG; 258 PMDL pMdl = g_pfnrtMmAllocatePagesForMdl(Zero, HighAddr, Zero, cb); 259 if (pMdl) 260 { 261 if (MmGetMdlByteCount(pMdl) >= cb) 262 { 263 __try 264 { 265 void *pv = g_pfnrtMmMapLockedPagesSpecifyCache(pMdl, KernelMode, MmCached, NULL /* no base address */, 266 FALSE /* no bug check on failure */, NormalPagePriority); 267 if (pv) 268 { 269 #ifdef RT_ARCH_AMD64 270 if (fExecutable) 271 MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE); 272 #endif 273 274 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PAGE, pv, cb); 275 if (pMemNt) 276 { 277 pMemNt->fAllocatedPagesForMdl = true; 278 pMemNt->cMdls = 1; 279 pMemNt->apMdls[0] = pMdl; 280 *ppMem = &pMemNt->Core; 281 return VINF_SUCCESS; 282 } 283 MmUnmapLockedPages(pv, pMdl); 284 } 285 } 286 __except(EXCEPTION_EXECUTE_HANDLER) 287 { 288 # ifdef LOG_ENABLED 289 NTSTATUS rcNt = GetExceptionCode(); 290 Log(("rtR0MemObjNativeAllocLow: Exception Code %#x\n", rcNt)); 291 # endif 292 /* nothing */ 293 } 294 } 295 g_pfnrtMmFreePagesFromMdl(pMdl); 296 ExFreePool(pMdl); 297 } 298 } 299 300 /* 233 301 * Try allocate the memory and create an MDL for them so 234 302 * we can query the physical addresses and do mappings later 235 303 * without running into out-of-memory conditions and similar problems. 236 304 */ 237 int rc = VERR_NO_PAGE_MEMORY;238 305 void *pv; 239 306 if (g_pfnrtExAllocatePoolWithTag) … … 248 315 MmBuildMdlForNonPagedPool(pMdl); 249 316 #ifdef RT_ARCH_AMD64 250 MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE); 317 if (fExecutable) 318 MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE); 251 319 #endif 252 320 … … 397 465 MmBuildMdlForNonPagedPool(pMdl); 398 466 #ifdef RT_ARCH_AMD64 399 MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE); 467 if (fExecutable) 468 MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE); 400 469 #endif 401 470
Note:
See TracChangeset
for help on using the changeset viewer.