Changeset 28317 in vbox
- Timestamp:
- Apr 14, 2010 6:06:05 PM (15 years ago)
- Location:
- trunk/src
- Files:
-
- 24 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Additions/common/VBoxService/VBoxServiceBalloon.cpp
r28048 r28317 69 69 void *pv = RTMemPageAlloc(PAGE_SIZE); 70 70 g_fSysMadviseWorks = madvise(pv, PAGE_SIZE, MADV_DONTFORK) == 0; 71 RTMemPageFree(pv );71 RTMemPageFree(pv, PAGE_SIZE); 72 72 #endif 73 73 } … … 148 148 #else 149 149 150 RTMemPageFree(pu8 );150 RTMemPageFree(pu8, cb); 151 151 152 152 #endif -
trunk/src/VBox/Additions/common/VBoxService/VBoxServiceClipboard-os2.cpp
r21218 r28317 423 423 */ 424 424 uint32_t cb = _4K; 425 int rc = VERR_NO_MEMORY; 426 void *pv = RTMemPageAllocZ(cb); 425 uint32_t cbAllocated = cb; 426 int rc = VERR_NO_MEMORY; 427 void *pv = RTMemPageAllocZ(cbAllocated); 427 428 if (pv) 428 429 { … … 431 432 if (rc == VINF_BUFFER_OVERFLOW) 432 433 { 433 RTMemPageFree(pv );434 cb = RT_ALIGN_32(cb, PAGE_SIZE);435 pv = RTMemPageAllocZ(cb );434 RTMemPageFree(pv, cbAllocated); 435 cbAllocated = cb = RT_ALIGN_32(cb, PAGE_SIZE); 436 pv = RTMemPageAllocZ(cbAllocated); 436 437 rc = VbglR3ClipboardReadData(g_u32ClientId, fFormat, pv, cb, &cb); 437 438 } 438 439 if (RT_FAILURE(rc)) 439 RTMemPageFree(pv );440 RTMemPageFree(pv, cbAllocated); 440 441 } 441 442 if (RT_SUCCESS(rc)) … … 458 459 } 459 460 } 460 RTMemPageFree(pv );461 RTMemPageFree(pv, cbAllocated); 461 462 } 462 463 else -
trunk/src/VBox/HostDrivers/Support/SUPLib.cpp
r26430 r28317 1184 1184 if (RT_UNLIKELY(g_u32FakeMode)) 1185 1185 { 1186 RTMemPageFree(pvPages );1186 RTMemPageFree(pvPages, cPages * PAGE_SIZE); 1187 1187 return VINF_SUCCESS; 1188 1188 } 1189 1189 1190 1190 /* 1191 * Try normal free first, then if it fails check if we're using the fallback .1191 * Try normal free first, then if it fails check if we're using the fallback 1192 1192 * for the allocations without kernel mappings and attempt unlocking it. 1193 1193 */ … … 1282 1282 if (RT_UNLIKELY(g_u32FakeMode)) 1283 1283 { 1284 RTMemPageFree(pv );1284 RTMemPageFree(pv, cPages * PAGE_SIZE); 1285 1285 return VINF_SUCCESS; 1286 1286 } … … 1385 1385 if (RT_UNLIKELY(g_u32FakeMode)) 1386 1386 { 1387 RTMemPageFree(pv );1387 RTMemPageFree(pv, cPages * PAGE_SIZE); 1388 1388 return VINF_SUCCESS; 1389 1389 } -
trunk/src/VBox/HostDrivers/Support/freebsd/SUPLib-freebsd.cpp
r22077 r28317 186 186 187 187 188 int suplibOsPageFree(PSUPLIBDATA pThis, void *pvPages, size_t /* cPages */)188 int suplibOsPageFree(PSUPLIBDATA pThis, void *pvPages, size_t cPages) 189 189 { 190 190 NOREF(pThis); 191 RTMemPageFree(pvPages );191 RTMemPageFree(pvPages, cPages * PAGE_SIZE); 192 192 return VINF_SUCCESS; 193 193 } -
trunk/src/VBox/Runtime/Makefile.kmk
r28303 r28317 1657 1657 RuntimeRC_INCS = include 1658 1658 RuntimeRC_SOURCES = \ 1659 common/checksum/crc32.cpp \ 1660 common/checksum/crc64.cpp \ 1661 common/checksum/md5.cpp \ 1659 1662 common/log/log.cpp \ 1660 1663 common/log/logellipsis.cpp \ -
trunk/src/VBox/Runtime/common/alloc/memcache.cpp
r26525 r28317 252 252 } 253 253 254 RTMemPageFree(pPage );254 RTMemPageFree(pPage, PAGE_SIZE); 255 255 } 256 256 -
trunk/src/VBox/Runtime/common/misc/lockvalidator.cpp
r28271 r28317 48 48 #include "internal/magics.h" 49 49 #include "internal/thread.h" 50 50 51 51 52 /******************************************************************************* -
trunk/src/VBox/Runtime/r3/alloc-ef.cpp
r28298 r28317 363 363 } 364 364 rtmemComplain(pszOp, "RTMemProtect failed, pvEFence=%p size %d, rc=%d\n", pvEFence, RTALLOC_EFENCE_SIZE, rc); 365 RTMemPageFree(pvBlock );365 RTMemPageFree(pvBlock, cbBlock); 366 366 } 367 367 else … … 456 456 rc = RTMemProtect(pvBlock, cbBlock, RTMEM_PROT_READ | RTMEM_PROT_WRITE); 457 457 if (RT_SUCCESS(rc)) 458 RTMemPageFree(pvBlock );458 RTMemPageFree(pvBlock, RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTALLOC_EFENCE_SIZE); 459 459 else 460 460 rtmemComplain(pszOp, "RTMemProtect(%p, %#x, RTMEM_PROT_READ | RTMEM_PROT_WRITE) -> %d\n", pvBlock, cbBlock, rc); … … 479 479 int rc = RTMemProtect(pvEFence, RTALLOC_EFENCE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE); 480 480 if (RT_SUCCESS(rc)) 481 RTMemPageFree(pvBlock );481 RTMemPageFree(pvBlock, RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTALLOC_EFENCE_SIZE); 482 482 else 483 483 rtmemComplain(pszOp, "RTMemProtect(%p, %#x, RTMEM_PROT_READ | RTMEM_PROT_WRITE) -> %d\n", pvEFence, RTALLOC_EFENCE_SIZE, rc); -
trunk/src/VBox/Runtime/r3/darwin/alloc-darwin.cpp
r27492 r28317 126 126 * NULL will be ignored. 127 127 */ 128 RTDECL(void) RTMemPageFree(void *pv ) RT_NO_THROW128 RTDECL(void) RTMemPageFree(void *pv, size_t cb) RT_NO_THROW 129 129 { 130 130 if (pv) -
trunk/src/VBox/Runtime/r3/freebsd/alloc-freebsd.cpp
r21292 r28317 84 84 RTDECL(void) RTMemExecFree(void *pv) RT_NO_THROW 85 85 { 86 return RTMemPageFree(pv); 86 if (pv) 87 free(pv); 87 88 } 88 89 … … 129 130 * NULL will be ignored. 130 131 */ 131 RTDECL(void) RTMemPageFree(void *pv ) RT_NO_THROW132 RTDECL(void) RTMemPageFree(void *pv, size_t cb) RT_NO_THROW 132 133 { 133 134 if (pv) -
trunk/src/VBox/Runtime/r3/posix/alloc-posix.cpp
r28311 r28317 44 44 #include <sys/mman.h> 45 45 46 #if !defined(RT_USE_MMAP) && (defined(RT_OS_LINUX)) 47 # define RT_USE_MMAP 48 #endif 46 47 /******************************************************************************* 48 * Defined Constants And Macros * 49 *******************************************************************************/ 50 #if !defined(RT_USE_MMAP_EXEC) && (defined(RT_OS_LINUX)) 51 # define RT_USE_MMAP_EXEC 52 #endif 53 54 #if !defined(RT_USE_MMAP_PAGE) && 0 /** @todo mmap is too slow for full scale EF setup. */ 55 # define RT_USE_MMAP_PAGE 56 #endif 57 49 58 50 59 /******************************************************************************* 51 60 * Structures and Typedefs * 52 61 *******************************************************************************/ 53 #ifdef RT_USE_MMAP 62 #ifdef RT_USE_MMAP_EXEC 54 63 /** 55 64 * RTMemExecAlloc() header used when using mmap for allocating the memory. … … 67 76 68 77 /** Magic for RTMEMEXECHDR. */ 69 # define RTMEMEXECHDR_MAGIC (~(size_t)0xfeedbabe)70 71 #endif /* RT_USE_MMAP */78 # define RTMEMEXECHDR_MAGIC (~(size_t)0xfeedbabe) 79 80 #endif /* RT_USE_MMAP_EXEC */ 72 81 73 82 … … 84 93 AssertMsg(cb, ("Allocating ZERO bytes is really not a good idea! Good luck with the next assertion!\n")); 85 94 86 #ifdef RT_USE_MMAP 95 #ifdef RT_USE_MMAP_EXEC 87 96 /* 88 97 * Use mmap to get low memory. … … 90 99 size_t cbAlloc = RT_ALIGN_Z(cb + sizeof(RTMEMEXECHDR), PAGE_SIZE); 91 100 void *pv = mmap(NULL, cbAlloc, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS 92 # if defined(RT_ARCH_AMD64) && defined(MAP_32BIT)101 # if defined(RT_ARCH_AMD64) && defined(MAP_32BIT) 93 102 | MAP_32BIT 94 # endif103 # endif 95 104 , -1, 0); 96 105 AssertMsgReturn(pv != MAP_FAILED, ("errno=%d cb=%#zx\n", errno, cb), NULL); … … 142 151 if (pv) 143 152 { 144 #ifdef RT_USE_MMAP 153 #ifdef RT_USE_MMAP_EXEC 145 154 PRTMEMEXECHDR pHdr = (PRTMEMEXECHDR)pv - 1; 146 155 AssertMsgReturnVoid(RT_ALIGN_P(pHdr, PAGE_SIZE) == pHdr, ("pHdr=%p pv=%p\n", pHdr, pv)); … … 164 173 RTDECL(void *) RTMemPageAlloc(size_t cb) RT_NO_THROW 165 174 { 166 #if 0 /** @todo huh? we're using posix_memalign in the next function... */ 175 #ifdef RT_USE_MMAP_PAGE 176 size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE); 177 void *pv = mmap(NULL, cbAligned, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 178 AssertMsgReturn(pv != MAP_FAILED, ("errno=%d cb=%#zx\n", errno, cb), NULL); 179 return pv; 180 181 #else 182 # if 0 /** @todo huh? we're using posix_memalign in the next function... */ 167 183 void *pv; 168 184 int rc = posix_memalign(&pv, PAGE_SIZE, RT_ALIGN_Z(cb, PAGE_SIZE)); … … 170 186 return pv; 171 187 return NULL; 172 # else188 # else 173 189 return memalign(PAGE_SIZE, cb); 190 # endif 174 191 #endif 175 192 } … … 185 202 RTDECL(void *) RTMemPageAllocZ(size_t cb) RT_NO_THROW 186 203 { 204 #ifdef RT_USE_MMAP_PAGE 205 size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE); 206 void *pv = mmap(NULL, cbAligned, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 207 AssertMsgReturn(pv != MAP_FAILED, ("errno=%d cb=%#zx\n", errno, cb), NULL); 208 return pv; 209 210 #else 187 211 void *pv; 188 212 int rc = posix_memalign(&pv, PAGE_SIZE, RT_ALIGN_Z(cb, PAGE_SIZE)); … … 193 217 } 194 218 return NULL; 219 #endif 195 220 } 196 221 … … 202 227 * NULL will be ignored. 203 228 */ 204 RTDECL(void) RTMemPageFree(void *pv ) RT_NO_THROW229 RTDECL(void) RTMemPageFree(void *pv, size_t cb) RT_NO_THROW 205 230 { 206 231 if (pv) 232 { 233 Assert(!((uintptr_t)pv & PAGE_OFFSET_MASK)); 234 235 #ifdef RT_USE_MMAP_PAGE 236 size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE); 237 int rc = munmap(pv, cbAligned); 238 AssertMsg(!rc, ("munmap(%p, %#zx) -> %d errno=%d\n", pv, cbAligned, rc, errno)); NOREF(rc); 239 #else 207 240 free(pv); 241 #endif 242 } 208 243 } 209 244 -
trunk/src/VBox/Runtime/r3/solaris/alloc-solaris.cpp
r27492 r28317 133 133 * NULL will be ignored. 134 134 */ 135 RTDECL(void) RTMemPageFree(void *pv ) RT_NO_THROW135 RTDECL(void) RTMemPageFree(void *pv, size_t cb) RT_NO_THROW 136 136 { 137 137 if (pv) -
trunk/src/VBox/Runtime/r3/test.cpp
r27649 r28317 529 529 } 530 530 531 RTMemPageFree(pMem->pvAlloc );531 RTMemPageFree(pMem->pvAlloc, pMem->cbAlloc); 532 532 } 533 533 RTMemFree(pMem); … … 587 587 rc = RTMemProtect(pMem->aGuards[0].pv, pMem->aGuards[0].cb, RTMEM_PROT_WRITE | RTMEM_PROT_READ); AssertRC(rc); 588 588 rc = RTMemProtect(pMem->aGuards[1].pv, pMem->aGuards[1].cb, RTMEM_PROT_WRITE | RTMEM_PROT_READ); AssertRC(rc); 589 RTMemPageFree(pMem->pvAlloc );589 RTMemPageFree(pMem->pvAlloc, pMem->cbAlloc); 590 590 RTMemFree(pMem); 591 591 } -
trunk/src/VBox/Runtime/r3/win/alloc-win.cpp
r21872 r28317 148 148 * NULL will be ignored. 149 149 */ 150 RTDECL(void) RTMemPageFree(void *pv ) RT_NO_THROW150 RTDECL(void) RTMemPageFree(void *pv, size_t cb) RT_NO_THROW 151 151 { 152 152 if (pv) -
trunk/src/VBox/Runtime/testcase/tstFileAio.cpp
r25000 r28317 172 172 /* Free buffers. */ 173 173 for (unsigned i = 0; i < cMaxReqsInFlight; i++) 174 RTMemPageFree(papvBuf[i] );174 RTMemPageFree(papvBuf[i], cbTestBuf); 175 175 176 176 /* Free requests. */ -
trunk/src/VBox/VMM/PDMAsyncCompletionFileCache.cpp
r28065 r28317 368 368 ("Entry is dirty and/or still in progress fFlags=%#x\n", pEntry->fFlags)); 369 369 370 RTMemPageFree(pEntry->pbData );370 RTMemPageFree(pEntry->pbData, pEntry->cbData); 371 371 RTMemFree(pEntry); 372 372 } … … 439 439 } 440 440 else if (pCurr->pbData) 441 RTMemPageFree(pCurr->pbData );441 RTMemPageFree(pCurr->pbData, pEntry->cbData); 442 442 443 443 pCurr->pbData = NULL; … … 1172 1172 pdmacFileCacheSub(pCache, pEntry->cbData); 1173 1173 1174 RTMemPageFree(pEntry->pbData );1174 RTMemPageFree(pEntry->pbData, pEntry->cbData); 1175 1175 RTMemFree(pEntry); 1176 1176 -
trunk/src/VBox/VMM/PDMAsyncCompletionFileInternal.h
r28224 r28317 645 645 /** Data segment. */ 646 646 RTSGSEG DataSeg; 647 /** Flag whether this segment uses a bouncebuffer648 * because the provided bufferdoesn't meet host requirements. */649 bool fBounceBuffer;647 /** When non-zero the segment uses a bounce buffer because the provided buffer 648 * doesn't meet host requirements. */ 649 size_t cbBounceBuffer; 650 650 /** Pointer to the used bounce buffer if any. */ 651 651 void *pvBounceBuffer; 652 652 /** Start offset in the bounce buffer to copy from. */ 653 uint32_t uBounceBufOffset;653 uint32_t offBounceBuffer; 654 654 /** Flag whether this is a prefetch request. */ 655 655 bool fPrefetch; -
trunk/src/VBox/VMM/PDMAsyncCompletionFileNormal.cpp
r28225 r28317 432 432 pAioMgr->iFreeEntryNext = (pAioMgr->iFreeEntryNext + 1) % pAioMgr->cReqEntries; 433 433 434 if (pTask-> fBounceBuffer)434 if (pTask->cbBounceBuffer) 435 435 RTMemFree(pTask->pvBounceBuffer); 436 436 … … 450 450 pAioMgr->cRequestsActiveMax = pAioMgr->cRequestsActive; 451 451 } 452 452 453 453 LogFlow(("Removed requests. I/O manager has a total of %d active requests now\n", pAioMgr->cRequestsActive)); 454 454 LogFlow(("Endpoint has a total of %d active requests now\n", pEndpoint->AioMgr.cRequestsActive)); … … 602 602 603 603 pTask->fPrefetch = false; 604 pTask-> fBounceBuffer = false;604 pTask->cbBounceBuffer = 0; 605 605 606 606 /* … … 721 721 722 722 /* Create bounce buffer. */ 723 pTask-> fBounceBuffer = true;723 pTask->cbBounceBuffer = cbToTransfer; 724 724 725 725 AssertMsg(pTask->Off >= offStart, ("Overflow in calculation Off=%llu offStart=%llu\n", 726 726 pTask->Off, offStart)); 727 pTask-> uBounceBufOffset= pTask->Off - offStart;727 pTask->offBounceBuffer = pTask->Off - offStart; 728 728 729 729 /** @todo: I think we need something like a RTMemAllocAligned method here. … … 755 755 } 756 756 else 757 pTask-> fBounceBuffer = false;757 pTask->cbBounceBuffer = 0; 758 758 759 759 if (RT_SUCCESS(rc)) … … 786 786 { 787 787 /* Cleanup */ 788 if (pTask-> fBounceBuffer)789 RTMemPageFree(pTask->pvBounceBuffer );788 if (pTask->cbBounceBuffer) 789 RTMemPageFree(pTask->pvBounceBuffer, pTask->cbBounceBuffer); 790 790 } 791 791 } … … 1146 1146 pEndpoint->AioMgr.cReqsProcessed++; 1147 1147 1148 if (pTask-> fBounceBuffer)1149 RTMem Free(pTask->pvBounceBuffer);1148 if (pTask->cbBounceBuffer) 1149 RTMemPageFree(pTask->pvBounceBuffer, pTask->cbBounceBuffer); 1150 1150 1151 1151 /* Queue the request on the pending list. */ … … 1187 1187 AssertMsg( RT_FAILURE(rcReq) 1188 1188 || ( (cbTransfered == pTask->DataSeg.cbSeg) 1189 || (pTask-> fBounceBuffer && (cbTransfered >= pTask->DataSeg.cbSeg))),1189 || (pTask->cbBounceBuffer && cbTransfered >= pTask->DataSeg.cbSeg)), 1190 1190 ("Task didn't completed successfully (rc=%Rrc) or was incomplete (cbTransfered=%u)\n", rcReq, cbTransfered)); 1191 1191 … … 1193 1193 { 1194 1194 Assert(pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE); 1195 Assert(pTask-> fBounceBuffer);1196 1197 memcpy(((uint8_t *)pTask->pvBounceBuffer) + pTask-> uBounceBufOffset,1195 Assert(pTask->cbBounceBuffer); 1196 1197 memcpy(((uint8_t *)pTask->pvBounceBuffer) + pTask->offBounceBuffer, 1198 1198 pTask->DataSeg.pvSeg, 1199 1199 pTask->DataSeg.cbSeg); … … 1219 1219 else 1220 1220 { 1221 if (RT_SUCCESS(rc) && pTask-> fBounceBuffer)1221 if (RT_SUCCESS(rc) && pTask->cbBounceBuffer) 1222 1222 { 1223 1223 if (pTask->enmTransferType == PDMACTASKFILETRANSFER_READ) 1224 1224 memcpy(pTask->DataSeg.pvSeg, 1225 ((uint8_t *)pTask->pvBounceBuffer) + pTask->uBounceBufOffset,1226 1227 1228 RTMemPageFree(pTask->pvBounceBuffer );1225 ((uint8_t *)pTask->pvBounceBuffer) + pTask->offBounceBuffer, 1226 pTask->DataSeg.cbSeg); 1227 1228 RTMemPageFree(pTask->pvBounceBuffer, pTask->cbBounceBuffer); 1229 1229 } 1230 1230 -
trunk/src/VBox/VMM/PDMLdr.cpp
r28262 r28317 652 652 RTCritSectLeave(&pUVM->pdm.s.ListCritSect); 653 653 RTMemFree(pModule); 654 RTMemTmpFree(pszFile);655 654 LogRel(("pdmR3LoadR0U: pszName=\"%s\" rc=%Rrc\n", pszName, rc)); 656 655 657 656 /* Don't consider VERR_PDM_MODULE_NAME_CLASH and VERR_NO_MEMORY above as these are very unlikely. */ 658 657 if (RT_FAILURE(rc) && pUVM->pVM) /** @todo VMR3SetErrorU. */ 659 return VMSetError(pUVM->pVM, rc, RT_SRC_POS, N_("Cannot load R0 module %s"), pszFilename); 658 rc = VMSetError(pUVM->pVM, rc, RT_SRC_POS, N_("Cannot load R0 module %s"), pszFilename); 659 660 RTMemTmpFree(pszFile); /* might be reference thru pszFilename in the above VMSetError call. */ 660 661 return rc; 661 662 } -
trunk/src/VBox/VMM/SSM.cpp
r26526 r28317 1675 1675 pHead = pCur->pNext; 1676 1676 pCur->pNext = NULL; 1677 RTMemPageFree(pCur );1677 RTMemPageFree(pCur, sizeof(*pCur)); 1678 1678 } 1679 1679 } … … 1688 1688 static void ssmR3StrmDelete(PSSMSTRM pStrm) 1689 1689 { 1690 RTMemPageFree(pStrm->pCur );1690 RTMemPageFree(pStrm->pCur, sizeof(*pStrm->pCur)); 1691 1691 pStrm->pCur = NULL; 1692 1692 ssmR3StrmDestroyBufList(pStrm->pHead); -
trunk/src/VBox/VMM/VM.cpp
r28258 r28317 523 523 RTTlsFree(pUVM->vm.s.idxTLS); 524 524 } 525 RTMemPageFree(pUVM );525 RTMemPageFree(pUVM, sizeof(*pUVM)); 526 526 return rc; 527 527 } … … 2510 2510 2511 2511 ASMAtomicUoWriteU32(&pUVM->u32Magic, UINT32_MAX); 2512 RTMemPageFree(pUVM );2512 RTMemPageFree(pUVM, sizeof(*pUVM)); 2513 2513 2514 2514 RTLogFlush(NULL); -
trunk/src/recompiler/VBoxREMWrapper.cpp
r27254 r28317 1090 1090 { REMPARMDESC_FLAGS_INT, sizeof(size_t), NULL }, 1091 1091 { REMPARMDESC_FLAGS_INT, sizeof(unsigned), NULL } 1092 }; 1093 static const REMPARMDESC g_aArgsRTMemFree[] = 1094 { 1095 { REMPARMDESC_FLAGS_INT, sizeof(void *), NULL }, 1096 { REMPARMDESC_FLAGS_INT, sizeof(size_t), NULL } 1092 1097 }; 1093 1098 static const REMPARMDESC g_aArgsRTStrPrintf[] = … … 1327 1332 { "RTMemFree", (void *)(uintptr_t)&RTMemFree, &g_aArgsPTR[0], RT_ELEMENTS(g_aArgsPTR), REMFNDESC_FLAGS_RET_VOID, 0, NULL }, 1328 1333 { "RTMemPageAlloc", (void *)(uintptr_t)&RTMemPageAlloc, &g_aArgsSIZE_T[0], RT_ELEMENTS(g_aArgsSIZE_T), REMFNDESC_FLAGS_RET_INT, sizeof(void *), NULL }, 1329 { "RTMemPageFree", (void *)(uintptr_t)&RTMemPageFree, &g_aArgs PTR[0], RT_ELEMENTS(g_aArgsPTR),REMFNDESC_FLAGS_RET_VOID, 0, NULL },1334 { "RTMemPageFree", (void *)(uintptr_t)&RTMemPageFree, &g_aArgsRTMemProtect[0], RT_ELEMENTS(g_aArgsRTMemProtect), REMFNDESC_FLAGS_RET_VOID, 0, NULL }, 1330 1335 { "RTMemProtect", (void *)(uintptr_t)&RTMemProtect, &g_aArgsRTMemProtect[0], RT_ELEMENTS(g_aArgsRTMemProtect), REMFNDESC_FLAGS_RET_INT, sizeof(int), NULL }, 1331 1336 { "RTStrPrintf", (void *)(uintptr_t)&RTStrPrintf, &g_aArgsRTStrPrintf[0], RT_ELEMENTS(g_aArgsRTStrPrintf), REMFNDESC_FLAGS_RET_INT | REMFNDESC_FLAGS_ELLIPSIS, sizeof(size_t), NULL }, -
trunk/src/recompiler/VBoxRecompiler.c
r27254 r28317 513 513 if (RT_FAILURE(rc)) 514 514 { 515 RTMemPageFree(phys_ram_dirty );515 RTMemPageFree(phys_ram_dirty, cbBitmapFull); 516 516 AssertLogRelRCReturn(rc, rc); 517 517 } -
trunk/src/recompiler/osdep.h
r17274 r28317 21 21 #endif 22 22 #define qemu_vsnprintf(pszBuf, cbBuf, pszFormat, args) \ 23 RTStrPrintfV((pszBuf), (cbBuf), (pszFormat), (args))23 RTStrPrintfV((pszBuf), (cbBuf), (pszFormat), (args)) 24 24 #define qemu_vprintf(pszFormat, args) \ 25 RTLogPrintfV((pszFormat), (args))26 #define qemu_printf RTLogPrintf27 #define qemu_malloc(cb) RTMemAlloc(cb)28 #define qemu_mallocz(cb) RTMemAllocZ(cb)29 #define qemu_realloc(ptr, cb) RTMemRealloc(ptr, cb)25 RTLogPrintfV((pszFormat), (args)) 26 #define qemu_printf RTLogPrintf 27 #define qemu_malloc(cb) RTMemAlloc(cb) 28 #define qemu_mallocz(cb) RTMemAllocZ(cb) 29 #define qemu_realloc(ptr, cb) RTMemRealloc(ptr, cb) 30 30 31 #define qemu_free(pv) RTMemFree(pv)32 #define qemu_strdup(psz) RTStrDup(psz)31 #define qemu_free(pv) RTMemFree(pv) 32 #define qemu_strdup(psz) RTStrDup(psz) 33 33 34 #define qemu_vmalloc(cb) RTMemPageAlloc(cb)35 #define qemu_vfree(pv) RTMemPageFree(pv)34 #define qemu_vmalloc(cb) RTMemPageAlloc(cb) 35 #define qemu_vfree(pv) RTMemPageFree(pv, ???) 36 36 37 37 #ifndef NULL
Note:
See TracChangeset
for help on using the changeset viewer.