- Timestamp:
- Jan 21, 2007 9:41:29 PM (18 years ago)
- Location:
- trunk
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/iprt/err.h
r10 r217 188 188 /** The operation was cancelled by the user. */ 189 189 #define VERR_CANCELLED (-70) 190 190 /** Failed to initialize a memory object. 191 * Exactly what this means is OS specific. */ 192 #define VERR_MEMOBJ_INIT_FAILED (-71) 193 /** Out of memory condition when allocating memory with low physical backing. */ 194 #define VERR_NO_LOW_MEMORY (-72) 195 /** Out of memory condition when allocating physical memory (without mapping). */ 196 #define VERR_NO_PHYS_MEMORY (-73) 197 /** The address (virtual or physical) is too big. */ 198 #define VERR_ADDRESS_TOO_BIG (-74) 199 /** Failed to map a memory object. */ 200 #define VERR_MAP_FAILED (-75) 191 201 /** @} */ 192 202 -
trunk/src/VBox/Runtime/include/internal/memobj.h
r207 r217 186 186 } u; 187 187 188 189 188 } RTR0MEMOBJINTERNAL; 190 189 -
trunk/src/VBox/Runtime/r0drv/alloc-r0drv.cpp
r1 r217 157 157 RTDECL(void) RTMemFree(void *pv) 158 158 { 159 PRTMEMHDR pHdr = (PRTMEMHDR)pv - 1; 159 PRTMEMHDR pHdr; 160 if (!pv) 161 return; 162 pHdr = (PRTMEMHDR)pv - 1; 160 163 if (pHdr->u32Magic == RTMEMHDR_MAGIC) 161 164 { -
trunk/src/VBox/Runtime/r0drv/darwin/memobj-r0drv-darwin.cpp
r207 r217 35 35 36 36 37 /******************************************************************************* 38 * Structures and Typedefs * 39 *******************************************************************************/ 40 /** 41 * The Darwin version of the memory object structure. 42 */ 43 typedef struct RTR0MEMOBJDARWIN 44 { 45 /** The core structure. */ 46 RTR0MEMOBJINTERNAL Core; 47 /** Pointer to the memory descriptor created for allocated and locked memory. */ 48 IOMemoryDescriptor *pMemDesc; 49 /** Pointer to the memory mapping object for mapped memory. */ 50 IOMemoryMap *pMemMap; 51 } RTR0MEMOBJDARWIN, *PRTR0MEMOBJDARWIN; 52 37 53 38 54 int rtR0MemObjNativeFree(RTR0MEMOBJ pMem) 39 55 { 56 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)pMem; 57 58 /* 59 * Release the IOMemoryDescriptor/IOMemoryMap associated with the object. 60 */ 61 if (pMemDarwin->pMemDesc) 62 { 63 pMemDarwin->pMemDesc->release(); 64 pMemDarwin->pMemDesc = NULL; 65 Assert(!pMemDarwin->pMemMap); 66 } 67 else if (pMemDarwin->pMemMap) 68 { 69 pMemDarwin->pMemMap->release(); 70 pMemDarwin->pMemMap = NULL; 71 } 72 73 /* 74 * Release any memory that we've allocated or locked. 75 */ 76 switch (pMemDarwin->Core.enmType) 77 { 78 case RTR0MEMOBJTYPE_PAGE: 79 IOFreeAligned(pMemDarwin->Core.pv, pMemDarwin->Core.cb); 80 break; 81 82 /*case RTR0MEMOBJTYPE_LOW: => RTR0MEMOBJTYPE_CONT 83 break;*/ 84 85 case RTR0MEMOBJTYPE_CONT: 86 IOFreeContiguous(pMemDarwin->Core.pv, pMemDarwin->Core.cb); 87 break; 88 89 case RTR0MEMOBJTYPE_LOCK: 90 AssertMsgFailed(("RTR0MEMOBJTYPE_LOCK\n")); 91 return VERR_INTERNAL_ERROR; 92 break; 93 94 case RTR0MEMOBJTYPE_PHYS: 95 /*if (pMemDarwin->Core.u.Phys.fAllocated) 96 IOFreePhysical(pMemDarwin->Core.u.Phys.PhysBase, pMemDarwin->Core.cb);*/ 97 Assert(!pMemDarwin->Core.u.Phys.fAllocated); 98 break; 99 100 case RTR0MEMOBJTYPE_RES_VIRT: 101 AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n")); 102 return VERR_INTERNAL_ERROR; 103 break; 104 105 case RTR0MEMOBJTYPE_MAPPING: 106 /* nothing to do here. */ 107 break; 108 109 default: 110 AssertMsgFailed(("enmType=%d\n", pMemDarwin->Core.enmType)); 111 return VERR_INTERNAL_ERROR; 112 } 113 114 return VINF_SUCCESS; 115 } 116 117 118 int rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable) 119 { 120 /* 121 * Try allocate the memory and create it's IOMemoryDescriptor first. 122 */ 123 int rc = VERR_NO_PAGE_MEMORY; 124 AssertCompile(sizeof(IOPhysicalAddress) == 4); 125 void *pv = IOMallocAligned(cb, PAGE_SIZE); 126 if (pv) 127 { 128 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddress((vm_address_t)pv, cb, kIODirectionInOut, kernel_task); 129 if (pMemDesc) 130 { 131 /* 132 * Create the IPRT memory object. 133 */ 134 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_PAGE, pv, cb); 135 if (pMemDarwin) 136 { 137 pMemDarwin->pMemDesc = pMemDesc; 138 *ppMem = &pMemDarwin->Core; 139 return VINF_SUCCESS; 140 } 141 142 rc = VERR_NO_MEMORY; 143 pMemDesc->release(); 144 } 145 else 146 rc = VERR_MEMOBJ_INIT_FAILED; 147 IOFreeAligned(pv, cb); 148 } 149 return rc; 150 } 151 152 153 int rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable) 154 { 155 /* 156 * IOMallocContiguous is the most suitable API. 157 */ 158 return rtR0MemObjNativeAllocCont(ppMem, cb, fExecutable); 159 } 160 161 162 int rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable) 163 { 164 /* 165 * Try allocate the memory and create it's IOMemoryDescriptor first. 166 */ 167 int rc = VERR_NO_CONT_MEMORY; 168 AssertCompile(sizeof(IOPhysicalAddress) == 4); 169 void *pv = IOMallocContiguous(cb, PAGE_SIZE, NULL); 170 if (pv) 171 { 172 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddress((vm_address_t)pv, cb, kIODirectionInOut, kernel_task); 173 if (pMemDesc) 174 { 175 /* a bit of useful paranoia. */ 176 addr64_t PhysAddr = pMemDesc->getPhysicalSegment64(0, NULL); 177 Assert(PhysAddr == pMemDesc->getPhysicalAddress()); 178 if ( PhysAddr > 0 179 && PhysAddr <= _4G 180 && PhysAddr + cb <= _4G) 181 { 182 /* 183 * Create the IPRT memory object. 184 */ 185 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_CONT, pv, cb); 186 if (pMemDarwin) 187 { 188 pMemDarwin->Core.u.Cont.Phys = PhysAddr; 189 pMemDarwin->pMemDesc = pMemDesc; 190 *ppMem = &pMemDarwin->Core; 191 return VINF_SUCCESS; 192 } 193 194 rc = VERR_NO_MEMORY; 195 } 196 else 197 { 198 AssertMsgFailed(("PhysAddr=%llx\n", (unsigned long long)PhysAddr)); 199 rc = VERR_INTERNAL_ERROR; 200 } 201 pMemDesc->release(); 202 } 203 else 204 rc = VERR_MEMOBJ_INIT_FAILED; 205 IOFreeContiguous(pv, cb); 206 } 207 return rc; 208 } 209 210 211 int rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest) 212 { 213 #if 0 /* turned out IOMallocPhysical isn't exported yet. sigh. */ 214 /* 215 * Try allocate the memory and create it's IOMemoryDescriptor first. 216 * Note that IOMallocPhysical is not working correctly (it's ignoring the mask). 217 */ 218 219 /* first calc the mask (in the hope that it'll be used) */ 220 IOPhysicalAddress PhysMask = ~(IOPhysicalAddress)PAGE_OFFSET_MASK; 221 if (PhysHighest != NIL_RTHCPHYS) 222 { 223 PhysMask = ~(IOPhysicalAddress)0; 224 while (PhysMask > PhysHighest) 225 PhysMask >>= 1; 226 AssertReturn(PhysMask + 1 < cb, VERR_INVALID_PARAMETER); 227 PhysMask &= ~(IOPhysicalAddress)PAGE_OFFSET_MASK; 228 } 229 230 /* try allocate physical memory. */ 231 int rc = VERR_NO_PHYS_MEMORY; 232 mach_vm_address_t PhysAddr64 = IOMallocPhysical(cb, PhysMask); 233 if (PhysAddr64) 234 { 235 IOPhysicalAddress PhysAddr = PhysAddr64; 236 if ( PhysAddr == PhysAddr64 237 && PhysAddr < PhysHighest 238 && PhysAddr + cb <= PhysHighest) 239 { 240 /* create a descriptor. */ 241 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withPhysicalAddress(PhysAddr, cb, kIODirectionInOut); 242 if (pMemDesc) 243 { 244 Assert(PhysAddr == pMemDesc->getPhysicalAddress()); 245 246 /* 247 * Create the IPRT memory object. 248 */ 249 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_PHYS, NULL, cb); 250 if (pMemDarwin) 251 { 252 pMemDarwin->Core.u.Phys.PhysBase = PhysAddr; 253 pMemDarwin->Core.u.Phys.fAllocated = true; 254 pMemDarwin->pMemDesc = pMemDesc; 255 *ppMem = &pMemDarwin->Core; 256 return VINF_SUCCESS; 257 } 258 259 rc = VERR_NO_MEMORY; 260 pMemDesc->release(); 261 } 262 else 263 rc = VERR_MEMOBJ_INIT_FAILED; 264 } 265 else 266 { 267 AssertMsgFailed(("PhysAddr=%#llx PhysAddr64=%#llx PhysHigest=%#llx\n", (unsigned long long)PhysAddr, 268 (unsigned long long)PhysAddr64, (unsigned long long)PhysHighest)); 269 rc = VERR_INTERNAL_ERROR; 270 } 271 272 IOFreePhysical(PhysAddr64, cb); 273 } 274 275 /* 276 * Just in case IOMallocContigus doesn't work right, we can try fall back 277 * on a contiguous allcation. 278 */ 279 if (rc == VERR_INTERNAL_ERROR || rc == VERR_NO_PHYS_MEMORY) 280 { 281 int rc2 = rtR0MemObjNativeAllocCont(ppMem, cb, false); 282 if (RT_SUCCESS(rc2)) 283 rc = rc2; 284 } 285 286 return rc; 287 288 #else 289 290 return rtR0MemObjNativeAllocCont(ppMem, cb, false); 291 #endif 292 } 293 294 295 int rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb) 296 { 297 /* 298 * Validate the address range and create a descriptor for it. 299 */ 300 int rc = VERR_ADDRESS_TOO_BIG; 301 IOPhysicalAddress PhysAddr = Phys; 302 if (PhysAddr == Phys) 303 { 304 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withPhysicalAddress(PhysAddr, cb, kIODirectionInOut); 305 if (pMemDesc) 306 { 307 Assert(PhysAddr == pMemDesc->getPhysicalAddress()); 308 309 /* 310 * Create the IPRT memory object. 311 */ 312 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_PHYS, NULL, cb); 313 if (pMemDarwin) 314 { 315 pMemDarwin->Core.u.Phys.PhysBase = PhysAddr; 316 pMemDarwin->Core.u.Phys.fAllocated = false; 317 pMemDarwin->pMemDesc = pMemDesc; 318 *ppMem = &pMemDarwin->Core; 319 return VINF_SUCCESS; 320 } 321 322 rc = VERR_NO_MEMORY; 323 pMemDesc->release(); 324 } 325 } 326 else 327 AssertMsgFailed(("%#llx\n", (unsigned long long)Phys)); 328 return rc; 329 } 330 331 332 int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb) 333 { 40 334 return VERR_NOT_IMPLEMENTED; 41 335 } 42 336 43 337 44 int rtR0MemObjNative AllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)338 int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb) 45 339 { 46 340 return VERR_NOT_IMPLEMENTED; … … 48 342 49 343 50 int rtR0MemObjNative AllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)344 int rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment) 51 345 { 52 346 return VERR_NOT_IMPLEMENTED; … … 54 348 55 349 56 int rtR0MemObjNative AllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)350 int rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment) 57 351 { 58 352 return VERR_NOT_IMPLEMENTED; … … 60 354 61 355 62 int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb)63 {64 return VERR_NOT_IMPLEMENTED;65 }66 67 68 int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb)69 {70 return VERR_NOT_IMPLEMENTED;71 }72 73 74 int rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)75 {76 return VERR_NOT_IMPLEMENTED;77 }78 79 80 int rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb)81 {82 return VERR_NOT_IMPLEMENTED;83 }84 85 86 int rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)87 {88 return VERR_NOT_IMPLEMENTED;89 }90 91 92 int rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)93 {94 return VERR_NOT_IMPLEMENTED;95 }96 97 98 356 int rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment, unsigned fProt) 99 357 { 100 return VERR_NOT_IMPLEMENTED; 358 /* 359 * Must have a memory descriptor. 360 */ 361 int rc = VERR_INVALID_PARAMETER; 362 PRTR0MEMOBJDARWIN pMemToMapDarwin = (PRTR0MEMOBJDARWIN)pMemToMap; 363 if (pMemToMapDarwin->pMemDesc) 364 { 365 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map(kernel_task, kIOMapAnywhere, 366 kIOMapAnywhere | kIOMapDefaultCache); 367 if (pMemMap) 368 { 369 IOVirtualAddress VirtAddr = pMemMap->getVirtualAddress(); 370 void *pv = (void *)(uintptr_t)VirtAddr; 371 if ((uintptr_t)pv == VirtAddr) 372 { 373 /* 374 * Create the IPRT memory object. 375 */ 376 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_MAPPING, 377 pv, pMemToMapDarwin->Core.cb); 378 if (pMemDarwin) 379 { 380 pMemDarwin->Core.u.Mapping.Process = NIL_RTPROCESS; 381 pMemDarwin->pMemMap = pMemMap; 382 *ppMem = &pMemDarwin->Core; 383 return VINF_SUCCESS; 384 } 385 386 rc = VERR_NO_MEMORY; 387 } 388 else 389 rc = VERR_ADDRESS_TOO_BIG; 390 pMemMap->release(); 391 } 392 else 393 rc = VERR_MAP_FAILED; 394 } 395 return rc; 101 396 } 102 397 … … 104 399 int rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment, unsigned fProt) 105 400 { 106 return VERR_NOT_IMPLEMENTED; 107 } 108 109 110 RTHCPHYS rtR0MemObjNativeGetPagePhysAddr(RTR0MEMOBJ pMem, unsigned iPage) 111 { 112 return NIL_RTHCPHYS; 113 } 114 401 /* 402 * Must have a memory descriptor. 403 */ 404 int rc = VERR_INVALID_PARAMETER; 405 PRTR0MEMOBJDARWIN pMemToMapDarwin = (PRTR0MEMOBJDARWIN)pMemToMap; 406 if (pMemToMapDarwin->pMemDesc) 407 { 408 Assert(current_task() != kernel_task); 409 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map(current_task(), kIOMapAnywhere, 410 kIOMapAnywhere | kIOMapDefaultCache); 411 if (pMemMap) 412 { 413 IOVirtualAddress VirtAddr = pMemMap->getVirtualAddress(); 414 void *pv = (void *)(uintptr_t)VirtAddr; 415 if ((uintptr_t)pv == VirtAddr) 416 { 417 /* 418 * Create the IPRT memory object. 419 */ 420 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_MAPPING, 421 pv, pMemToMapDarwin->Core.cb); 422 if (pMemDarwin) 423 { 424 pMemDarwin->Core.u.Mapping.Process = /*RTProcSelf()*/(RTPROCESS)current_task(); 425 pMemDarwin->pMemMap = pMemMap; 426 *ppMem = &pMemDarwin->Core; 427 return VINF_SUCCESS; 428 } 429 430 rc = VERR_NO_MEMORY; 431 } 432 else 433 rc = VERR_ADDRESS_TOO_BIG; 434 pMemMap->release(); 435 } 436 else 437 rc = VERR_MAP_FAILED; 438 } 439 return rc; 440 } 441 442 443 RTHCPHYS rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, unsigned iPage) 444 { 445 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)pMem; 446 447 /* 448 * Get the memory descriptor. 449 */ 450 IOMemoryDescriptor *pMemDesc = pMemDarwin->pMemDesc; 451 if (!pMemDesc) 452 pMemDesc = pMemDarwin->pMemMap->getMemoryDescriptor(); 453 AssertReturn(pMemDesc, NIL_RTHCPHYS); 454 455 456 /* 457 * If we've got a memory descriptor, use getPhysicalSegment64(). 458 */ 459 addr64_t Addr = pMemDesc->getPhysicalSegment64(iPage * PAGE_SIZE, NULL); 460 AssertMsgReturn(Addr, ("iPage=%u\n", iPage), NIL_RTHCPHYS); 461 RTHCPHYS PhysAddr = Addr; 462 AssertMsgReturn(PhysAddr == Addr, ("PhysAddr=%VHp Addr=%RX64\n", PhysAddr, (uint64_t)Addr), NIL_RTHCPHYS); 463 return PhysAddr; 464 } 465 -
trunk/src/VBox/Runtime/r0drv/darwin/the-darwin-kernel.h
r1 r217 60 60 #include <IOKit/IOTypes.h> 61 61 #include <IOKit/IOLib.h> 62 #include <IOKit/IOMemoryDescriptor.h> 63 #include <IOKit/IOMapper.h> 62 64 63 65 -
trunk/src/VBox/Runtime/r0drv/memobj-r0drv.cpp
r207 r217 92 92 return VERR_NO_MEMORY; 93 93 pParent->uRel.Parent.papMappings = (PPRTR0MEMOBJINTERNAL)pv; 94 pParent->uRel.Parent.cMappingsAllocated = i + 32; 94 95 Assert(i == pParent->uRel.Parent.cMappings); 95 96 } … … 97 98 /* do the linking. */ 98 99 pParent->uRel.Parent.papMappings[i] = pChild; 100 pParent->uRel.Parent.cMappings++; 99 101 pChild->uRel.Child.pParent = pParent; 100 102 … … 188 190 } 189 191 190 /* return the size. */ 192 /* 193 * We know the address of physically contiguous allocations and mappings. 194 */ 195 if (pMem->enmType == RTR0MEMOBJTYPE_CONT) 196 return pMem->u.Cont.Phys + iPage * PAGE_SIZE; 197 if (pMem->enmType == RTR0MEMOBJTYPE_PHYS) 198 return pMem->u.Phys.PhysBase + iPage * PAGE_SIZE; 199 200 /* 201 * Do the job. 202 */ 191 203 return rtR0MemObjNativeGetPagePhysAddr(pMem, iPage); 192 204 } … … 263 275 AssertFatal(pParent->enmType > RTR0MEMOBJTYPE_INVALID && pParent->enmType < RTR0MEMOBJTYPE_END); 264 276 AssertFatal(!rtR0MemObjIsMapping(pParent)); 277 AssertFatal(pParent->uRel.Parent.cMappings > 0); 278 AssertPtr(pParent->uRel.Parent.papMappings); 265 279 266 280 /* locate and remove from the array of mappings. */ … … 436 450 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE); 437 451 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER); 452 AssertReturn(PhysHighest >= cb, VERR_INVALID_PARAMETER); 438 453 439 454 /* do the allocation. */
Note:
See TracChangeset
for help on using the changeset viewer.