Changeset 4135 in vbox
- Timestamp:
- Aug 14, 2007 1:29:43 AM (17 years ago)
- Location:
- trunk
- Files:
-
- 7 edited
- 1 copied
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/iprt/memobj.h
r4071 r4135 66 66 * @param iPage The page number within the object. 67 67 */ 68 RTR0DECL(RTHCPHYS) RTR0MemObjGetPagePhysAddr(RTR0MEMOBJ MemObj, unsignediPage);68 RTR0DECL(RTHCPHYS) RTR0MemObjGetPagePhysAddr(RTR0MEMOBJ MemObj, size_t iPage); 69 69 70 70 /** … … 123 123 * @param R0Process The process to lock pages in. NIL_R0PROCESS is an alias for the current one. 124 124 * 125 * @remark RTR0Mem ObjGetAddress() will return the rounded down address.125 * @remark RTR0MemGetAddressR3() and RTR0MemGetAddress() will return the rounded down address. 126 126 */ 127 127 RTR0DECL(int) RTR0MemObjLockUser(PRTR0MEMOBJ pMemObj, void *pv, size_t cb, RTR0PROCESS R0Process); … … 135 135 * @param cb Number of bytes to lock. This is rounded up to nearest page boundrary. 136 136 * 137 * @remark RTR0Mem ObjGetAddress() will return the rounded down address.137 * @remark RTR0MemGetAddress() will return the rounded down address. 138 138 */ 139 139 RTR0DECL(int) RTR0MemObjLockKernel(PRTR0MEMOBJ pMemObj, void *pv, size_t cb); -
trunk/src/VBox/Runtime/Makefile.kmk
r4071 r4135 6 6 # 7 7 # Copyright (C) 2006-2007 innotek GmbH 8 # 8 # 9 9 # This file is part of VirtualBox Open Source Edition (OSE), as 10 10 # available from http://www.virtualbox.org. This file is free software; … … 775 775 string/strncmp.cpp 776 776 777 #RuntimeR0Drv_SOURCES.win += \ 778 # r0drv/memobj-r0drv.cpp \ 779 # r0drv/nt/memobj-r0drv-nt.cpp 780 777 781 RuntimeR0Drv_SOURCES.win.amd64 = \ 778 782 $(RuntimeWin64ASM_SOURCES) … … 878 882 r0drv/solaris/thread-r0drv-solaris.c \ 879 883 r0drv/solaris/thread2-r0drv-solaris.c 880 884 881 885 ## PORTME: Porters create and add their selection of platform specific Ring-0 Driver files here. 882 886 -
trunk/src/VBox/Runtime/include/internal/memobj.h
r4071 r4135 99 99 /** The memory address. 100 100 * What this really is varies with the type. 101 * For PAGE, CONT, LOW, RES_VIRT , LOCK/R0 and MAP/R0 it's the ring-0 mapping.102 * For LOCK/R3 and MAP/R3 it is the ring-3 mapping.101 * For PAGE, CONT, LOW, RES_VIRT/R0, LOCK/R0 and MAP/R0 it's the ring-0 mapping. 102 * For LOCK/R3, RES_VIRT/R3 and MAP/R3 it is the ring-3 mapping. 103 103 * For PHYS this might actually be NULL if there isn't any mapping. 104 104 */ … … 352 352 * @param iPage The page number within the object (valid). 353 353 */ 354 RTHCPHYS rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, unsignediPage);354 RTHCPHYS rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage); 355 355 356 356 PRTR0MEMOBJINTERNAL rtR0MemObjNew(size_t cbSelf, RTR0MEMOBJTYPE enmType, void *pv, size_t cb); -
trunk/src/VBox/Runtime/r0drv/darwin/memobj-r0drv-darwin.cpp
r4071 r4135 163 163 { 164 164 #if 1 165 /* 165 /* 166 166 * Allocating 128KB for the low page pool can bit a bit exhausting on the kernel, 167 167 * it frequnetly causes the entire box to lock up on startup. 168 168 * 169 * So, try allocate the memory using IOMallocAligned first and if we get any high 169 * So, try allocate the memory using IOMallocAligned first and if we get any high 170 170 * physical memory we'll release it and fall back on IOMAllocContiguous. 171 171 */ … … 219 219 */ 220 220 return rtR0MemObjNativeAllocCont(ppMem, cb, fExecutable); 221 #endif 221 #endif 222 222 } 223 223 … … 582 582 583 583 584 RTHCPHYS rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, unsignediPage)584 RTHCPHYS rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage) 585 585 { 586 586 RTHCPHYS PhysAddr; -
trunk/src/VBox/Runtime/r0drv/freebsd/memobj-r0drv-freebsd.c
r4049 r4135 55 55 /** The core structure. */ 56 56 RTR0MEMOBJINTERNAL Core; 57 /** The VM object associated with the allocation. */ 57 /** The VM object associated with the allocation. */ 58 58 vm_object_t pObject; 59 59 /** the VM object associated with the mapping. … … 83 83 { 84 84 rc = vm_map_remove(kernel_map, 85 (vm_offset_t)pMemFreeBSD->Core.pv, 85 (vm_offset_t)pMemFreeBSD->Core.pv, 86 86 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb); 87 87 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc)); … … 93 93 { 94 94 rc = vm_map_remove(kernel_map, 95 (vm_offset_t)pMemFreeBSD->Core.pv, 95 (vm_offset_t)pMemFreeBSD->Core.pv, 96 96 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb); 97 97 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc)); … … 103 103 { 104 104 rc = vm_map_remove(kernel_map, 105 (vm_offset_t)pMemFreeBSD->Core.pv, 105 (vm_offset_t)pMemFreeBSD->Core.pv, 106 106 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb); 107 107 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc)); … … 115 115 if (pMemFreeBSD->Core.u.Lock.R0Process != NIL_RTR0PROCESS) 116 116 pMap = &((struct proc *)pMemFreeBSD->Core.u.Lock.R0Process)->p_vmspace->vm_map; 117 rc = vm_map_unwire(pMap, 118 (vm_offset_t)pMemFreeBSD->Core.pv, 119 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb, 117 rc = vm_map_unwire(pMap, 118 (vm_offset_t)pMemFreeBSD->Core.pv, 119 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb, 120 120 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); 121 121 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc)); … … 129 129 pMap = &((struct proc *)pMemFreeBSD->Core.u.Lock.R0Process)->p_vmspace->vm_map; 130 130 rc = vm_map_remove(pMap, 131 (vm_offset_t)pMemFreeBSD->Core.pv, 131 (vm_offset_t)pMemFreeBSD->Core.pv, 132 132 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb); 133 133 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc)); 134 134 break; 135 135 } 136 136 137 137 case RTR0MEMOBJTYPE_MAPPING: 138 138 { 139 139 /** @todo Figure out mapping... */ 140 140 } 141 142 /* unused: */ 141 142 /* unused: */ 143 143 case RTR0MEMOBJTYPE_LOW: 144 144 case RTR0MEMOBJTYPE_PHYS: … … 147 147 return VERR_INTERNAL_ERROR; 148 148 } 149 149 150 150 Assert(!pMemFreeBSD->pMappingObject); 151 151 … … 162 162 if (!pMemFreeBSD) 163 163 return VERR_NO_MEMORY; 164 165 /* 166 * We've two options here both expressed nicely by how kld allocates 167 * memory for the module bits: 168 * http://fxr.watson.org/fxr/source/kern/link_elf.c?v=RELENG62#L701 164 165 /* 166 * We've two options here both expressed nicely by how kld allocates 167 * memory for the module bits: 168 * http://fxr.watson.org/fxr/source/kern/link_elf.c?v=RELENG62#L701 169 169 */ 170 170 #if 0 … … 183 183 { 184 184 vm_offset_t MapAddress = vm_map_min(kernel_map); 185 rc = vm_map_find(kernel_map, /* map */ 185 rc = vm_map_find(kernel_map, /* map */ 186 186 pMemFreeBSD->pObject, /* object */ 187 187 0, /* offset */ … … 206 206 return VINF_SUCCESS; 207 207 } 208 208 209 209 vm_map_remove(kernel_map, 210 210 MapAddress, … … 218 218 rc = VERR_NO_MEMORY; 219 219 #endif 220 220 221 221 rtR0MemObjDelete(&pMemFreeBSD->Core); 222 222 return rc; … … 228 228 /* 229 229 * Try a Alloc first and see if we get luck, if not try contigmalloc. 230 * Might wish to try find our own pages or something later if this 231 * turns into a problemspot on AMD64 boxes. 230 * Might wish to try find our own pages or something later if this 231 * turns into a problemspot on AMD64 boxes. 232 232 */ 233 233 int rc = rtR0MemObjNativeAllocPage(ppMem, cb, fExecutable); … … 271 271 return VINF_SUCCESS; 272 272 } 273 273 274 274 NOREF(fExecutable); 275 275 rtR0MemObjDelete(&pMemFreeBSD->Core); … … 281 281 { 282 282 /** @todo check if there is a more appropriate API somewhere.. */ 283 283 284 284 /* create the object. */ 285 285 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_CONT, NULL, cb); … … 301 301 return VINF_SUCCESS; 302 302 } 303 303 304 304 rtR0MemObjDelete(&pMemFreeBSD->Core); 305 305 return VERR_NO_MEMORY; … … 330 330 if (!pMemFreeBSD) 331 331 return VERR_NO_MEMORY; 332 333 /* 334 * We could've used vslock here, but we don't wish to be subject to 332 333 /* 334 * We could've used vslock here, but we don't wish to be subject to 335 335 * resource usage restrictions, so we'll call vm_map_wire directly. 336 336 */ 337 rc = vm_map_wire(&((struct proc *)R0Process)->p_vmspace->vm_map, /* the map */ 337 rc = vm_map_wire(&((struct proc *)R0Process)->p_vmspace->vm_map, /* the map */ 338 338 (vm_offset_t)pv, /* start */ 339 339 (vm_offset_t)pv + cb, /* end */ … … 360 360 361 361 /* lock the memory */ 362 rc = vm_map_wire(kernel_map, /* the map */ 362 rc = vm_map_wire(kernel_map, /* the map */ 363 363 (vm_offset_t)pv, /* start */ 364 364 (vm_offset_t)pv + cb, /* end */ … … 377 377 /** 378 378 * Worker for the two virtual address space reservers. 379 * 379 * 380 380 * We're leaning on the examples provided by mmap and vm_mmap in vm_mmap.c here. 381 381 */ … … 383 383 { 384 384 int rc; 385 386 /* 385 386 /* 387 387 * The pvFixed address range must be within the VM space when specified. 388 388 */ … … 391 391 || (vm_offset_t)pvFixed + cb > vm_map_max(pMap))) 392 392 return VERR_INVALID_PARAMETER; 393 394 /* 395 * Create the object. 393 394 /* 395 * Create the object. 396 396 */ 397 397 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_RES_VIRT, NULL, cb); … … 412 412 MapAddress, 413 413 MapAddress + cb); 414 415 rc = vm_map_find(pMap, /* map */ 414 415 rc = vm_map_find(pMap, /* map */ 416 416 pMemFreeBSD->pObject, /* object */ 417 417 0, /* offset */ … … 425 425 { 426 426 if (R0Process != NIL_RTR0PROCESS) 427 { 427 { 428 428 rc = vm_map_inherit(pMap, 429 429 MapAddress, … … 444 444 rtR0MemObjDelete(&pMemFreeBSD->Core); 445 445 return rc; 446 446 447 447 } 448 448 … … 465 465 466 466 /* Phys: see pmap_mapdev in i386/i386/pmap.c (http://fxr.watson.org/fxr/source/i386/i386/pmap.c?v=RELENG62#L2860) */ 467 467 468 468 #if 0 469 469 /** @todo finish the implementation. */ … … 515 515 * Create a dummy mapping object for it. 516 516 * 517 * All mappings are read/write/execute in OS/2 and there isn't 517 * All mappings are read/write/execute in OS/2 and there isn't 518 518 * any cache options, so sharing is ok. And the main memory object 519 519 * isn't actually freed until all the mappings have been freed up … … 569 569 } 570 570 break; 571 #endif 571 #endif 572 572 return VERR_NOT_SUPPORTED; 573 573 … … 620 620 621 621 622 RTHCPHYS rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, unsignediPage)622 RTHCPHYS rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage) 623 623 { 624 624 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)pMem; … … 633 633 /* later */ 634 634 return NIL_RTHCPHYS; 635 } 635 } 636 636 } 637 637 case RTR0MEMOBJTYPE_PAGE: … … 640 640 return vtophys(pb); 641 641 } 642 642 643 643 case RTR0MEMOBJTYPE_CONT: 644 644 return pMemFreeBSD->Core.u.Cont.Phys + (iPage << PAGE_SHIFT); -
trunk/src/VBox/Runtime/r0drv/memobj-r0drv.cpp
r4071 r4135 48 48 cbSelf = sizeof(*pNew); 49 49 Assert(cbSelf >= sizeof(*pNew)); 50 Assert(cbSelf == (uint32_t)cbSelf); 50 51 51 52 /* … … 56 57 { 57 58 pNew->u32Magic = RTR0MEMOBJ_MAGIC; 58 pNew->cbSelf = cbSelf;59 pNew->cbSelf = (uint32_t)cbSelf; 59 60 pNew->enmType = enmType; 60 61 pNew->cb = cb; … … 160 161 161 162 /** 163 * Gets the ring-3 address of a ring-0 memory object. 164 * 165 * This only applies to ring-0 memory object with ring-3 mappings of some kind, i.e. 166 * locked user memory, reserved user address space and user mappings. This API should 167 * not be used on any other objects. 168 * 169 * @returns The address of the memory object. 170 * @returns NULL if the handle is invalid or if it's not an object with a ring-3 mapping. 171 * Strict builds will assert in both cases. 172 * @param MemObj The ring-0 memory object handle. 173 */ 174 RTR0DECL(RTR3PTR) RTR0MemObjAddressR3(RTR0MEMOBJ MemObj) 175 { 176 /* Validate the object handle. */ 177 AssertPtrReturn(MemObj, NIL_RTR3PTR); 178 PRTR0MEMOBJINTERNAL pMem = (PRTR0MEMOBJINTERNAL)MemObj; 179 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), NIL_RTR3PTR); 180 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), NIL_RTR3PTR); 181 AssertMsgReturn( ( pMem->enmType == RTR0MEMOBJTYPE_MAPPING 182 && pMem->u.Mapping.R0Process != NIL_RTR0PROCESS) 183 || ( pMem->enmType == RTR0MEMOBJTYPE_LOCK 184 && pMem->u.Lock.R0Process != NIL_RTR0PROCESS) 185 || ( pMem->enmType == RTR0MEMOBJTYPE_RES_VIRT 186 && pMem->u.ResVirt.R0Process != NIL_RTR0PROCESS), 187 ("%p: %d\n", pMem, pMem->enmType), NIL_RTR3PTR); 188 189 /* return the mapping address. */ 190 return (RTR3PTR)pMem->pv; 191 } 192 193 194 /** 162 195 * Gets the size of a ring-0 memory object. 163 196 * … … 189 222 * @param iPage The page number within the object. 190 223 */ 191 RTR0DECL(RTHCPHYS) RTR0MemObjGetPagePhysAddr(RTR0MEMOBJ MemObj, unsignediPage)224 RTR0DECL(RTHCPHYS) RTR0MemObjGetPagePhysAddr(RTR0MEMOBJ MemObj, size_t iPage) 192 225 { 193 226 /* Validate the object handle. */ … … 198 231 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), NIL_RTHCPHYS); 199 232 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), NIL_RTHCPHYS); 200 const unsignedcPages = (pMem->cb >> PAGE_SHIFT);233 const size_t cPages = (pMem->cb >> PAGE_SHIFT); 201 234 if (iPage >= cPages) 202 235 { … … 408 441 * @param R0Process The process to lock pages in. NIL_R0PROCESS is an alias for the current one. 409 442 * 410 * @remark RTR0Mem ObjGetAddress() will return the rounded down address.443 * @remark RTR0MemGetAddressR3() and RTR0MemGetAddress() will return the rounded down address. 411 444 */ 412 445 RTR0DECL(int) RTR0MemObjLockUser(PRTR0MEMOBJ pMemObj, void *pv, size_t cb, RTR0PROCESS R0Process) … … 435 468 * @param cb Number of bytes to lock. This is rounded up to nearest page boundrary. 436 469 * 437 * @remark RTR0Mem ObjGetAddress() will return the rounded down address.470 * @remark RTR0MemGetAddress() will return the rounded down address. 438 471 */ 439 472 RTR0DECL(int) RTR0MemObjLockKernel(PRTR0MEMOBJ pMemObj, void *pv, size_t cb) -
trunk/src/VBox/Runtime/r0drv/nt/memobj-r0drv-nt.cpp
r4134 r4135 1 1 /* $Id$ */ 2 2 /** @file 3 * innotek Portable Runtime - Ring-0 Memory Objects, Darwin.3 * innotek Portable Runtime - Ring-0 Memory Objects, NT. 4 4 */ 5 5 … … 20 20 * Header Files * 21 21 *******************************************************************************/ 22 #include "the- darwin-kernel.h"22 #include "the-nt-kernel.h" 23 23 24 24 #include <iprt/memobj.h> … … 31 31 #include "internal/memobj.h" 32 32 33 #define USE_VM_MAP_WIRE 33 34 /******************************************************************************* 35 * Defined Constants And Macros * 36 *******************************************************************************/ 37 /** Maximum number of bytes we try to lock down in one go. 38 * This is supposed to have a limit right below 256MB, but this appears 39 * to actually be much lower. The values here have been determined experimentally. 40 */ 41 #ifdef RT_ARCH_X86 42 # define MAX_LOCK_MEM_SIZE (32*1024*1024) /* 32MB */ 43 #endif 44 #ifdef RT_ARCH_AMD64 45 # define MAX_LOCK_MEM_SIZE (24*1024*1024) /* 24MB */ 46 #endif 34 47 35 48 … … 38 51 *******************************************************************************/ 39 52 /** 40 * The Darwinversion of the memory object structure.53 * The NT version of the memory object structure. 41 54 */ 42 typedef struct RTR0MEMOBJ DARWIN55 typedef struct RTR0MEMOBJNT 43 56 { 44 57 /** The core structure. */ 45 58 RTR0MEMOBJINTERNAL Core; 46 /** Pointer to the memory descriptor created for allocated and locked memory. */47 IOMemoryDescriptor *pMemDesc;48 /** Pointer to the memory mapping object for mapped memory.*/49 IOMemoryMap *pMemMap;50 } RTR0MEMOBJ DARWIN, *PRTR0MEMOBJDARWIN;59 /** The number of PMDLs (memory descriptor lists) in the array. */ 60 unsigned cMdls; 61 /** Array of MDL pointers. (variable size) */ 62 PMDL apMdls[1]; 63 } RTR0MEMOBJNT, *PRTR0MEMOBJNT; 51 64 52 65 53 66 int rtR0MemObjNativeFree(RTR0MEMOBJ pMem) 54 67 { 55 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)pMem; 56 57 /* 58 * Release the IOMemoryDescriptor/IOMemoryMap associated with the object. 59 */ 60 if (pMemDarwin->pMemDesc) 61 { 62 if (pMemDarwin->Core.enmType == RTR0MEMOBJTYPE_LOCK) 63 pMemDarwin->pMemDesc->complete(); /* paranoia */ 64 pMemDarwin->pMemDesc->release(); 65 pMemDarwin->pMemDesc = NULL; 66 Assert(!pMemDarwin->pMemMap); 67 } 68 else if (pMemDarwin->pMemMap) 69 { 70 pMemDarwin->pMemMap->release(); 71 pMemDarwin->pMemMap = NULL; 72 } 68 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem; 73 69 74 70 /* 75 71 * Release any memory that we've allocated or locked. 76 72 */ 77 switch (pMem Darwin->Core.enmType)73 switch (pMemNt->Core.enmType) 78 74 { 79 75 case RTR0MEMOBJTYPE_LOW: 80 76 case RTR0MEMOBJTYPE_PAGE: 81 IOFreeAligned(pMemDarwin->Core.pv, pMemDarwin->Core.cb);82 77 break; 83 78 84 79 case RTR0MEMOBJTYPE_CONT: 85 IOFreeContiguous(pMemDarwin->Core.pv, pMemDarwin->Core.cb);86 80 break; 87 81 88 82 case RTR0MEMOBJTYPE_LOCK: 89 { 90 #ifdef USE_VM_MAP_WIRE 91 vm_map_t Map = pMemDarwin->Core.u.Lock.R0Process != NIL_RTR0PROCESS 92 ? get_task_map((task_t)pMemDarwin->Core.u.Lock.R0Process) 93 : kernel_map; 94 kern_return_t kr = vm_map_unwire(Map, 95 (vm_map_offset_t)pMemDarwin->Core.pv, 96 (vm_map_offset_t)pMemDarwin->Core.pv + pMemDarwin->Core.cb, 97 0 /* not user */); 98 AssertRC(kr == KERN_SUCCESS); /** @todo don't ignore... */ 99 #endif 83 for (unsigned i = 0; i < pMemNt->cMdls; i++) 84 MmUnlockPages(pMemNt->apMdl[i]); 100 85 break; 101 }102 86 103 87 case RTR0MEMOBJTYPE_PHYS: 104 /*if (pMemDarwin->Core.u.Phys.fAllocated) 105 IOFreePhysical(pMemDarwin->Core.u.Phys.PhysBase, pMemDarwin->Core.cb);*/ 106 Assert(!pMemDarwin->Core.u.Phys.fAllocated); 88 Assert(!pMemNt->Core.u.Phys.fAllocated); 107 89 break; 108 90 … … 117 99 118 100 default: 119 AssertMsgFailed(("enmType=%d\n", pMem Darwin->Core.enmType));101 AssertMsgFailed(("enmType=%d\n", pMemNt->Core.enmType)); 120 102 return VERR_INTERNAL_ERROR; 121 103 } 122 104 105 /* 106 * Free any MDLs. 107 */ 108 for (unsigned i = 0; i < pMemNt->cMdls; i++) 109 { 110 MmUnlockPages(pMemNt->apMdl[i]); 111 IoFreeMdl(pMemNt->u.locked.papMdl[i]); 112 } 123 113 return VINF_SUCCESS; 124 114 } … … 141 131 * Create the IPRT memory object. 142 132 */ 143 PRTR0MEMOBJ DARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_PAGE, pv, cb);144 if (pMem Darwin)145 { 146 pMem Darwin->pMemDesc = pMemDesc;147 *ppMem = &pMem Darwin->Core;133 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PAGE, pv, cb); 134 if (pMemNt) 135 { 136 pMemNt->pMemDesc = pMemDesc; 137 *ppMem = &pMemNt->Core; 148 138 return VINF_SUCCESS; 149 139 } … … 163 153 { 164 154 #if 1 165 /* 155 /* 166 156 * Allocating 128KB for the low page pool can bit a bit exhausting on the kernel, 167 157 * it frequnetly causes the entire box to lock up on startup. 168 158 * 169 * So, try allocate the memory using IOMallocAligned first and if we get any high 159 * So, try allocate the memory using IOMallocAligned first and if we get any high 170 160 * physical memory we'll release it and fall back on IOMAllocContiguous. 171 161 */ … … 196 186 * Create the IPRT memory object. 197 187 */ 198 PRTR0MEMOBJ DARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_LOW, pv, cb);199 if (pMem Darwin)200 { 201 pMem Darwin->pMemDesc = pMemDesc;202 *ppMem = &pMem Darwin->Core;188 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_LOW, pv, cb); 189 if (pMemNt) 190 { 191 pMemNt->pMemDesc = pMemDesc; 192 *ppMem = &pMemNt->Core; 203 193 return VINF_SUCCESS; 204 194 } … … 219 209 */ 220 210 return rtR0MemObjNativeAllocCont(ppMem, cb, fExecutable); 221 #endif 211 #endif 222 212 } 223 213 … … 246 236 * Create the IPRT memory object. 247 237 */ 248 PRTR0MEMOBJ DARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_CONT, pv, cb);249 if (pMem Darwin)238 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_CONT, pv, cb); 239 if (pMemNt) 250 240 { 251 pMem Darwin->Core.u.Cont.Phys = PhysAddr;252 pMem Darwin->pMemDesc = pMemDesc;253 *ppMem = &pMem Darwin->Core;241 pMemNt->Core.u.Cont.Phys = PhysAddr; 242 pMemNt->pMemDesc = pMemDesc; 243 *ppMem = &pMemNt->Core; 254 244 return VINF_SUCCESS; 255 245 } … … 310 300 * Create the IPRT memory object. 311 301 */ 312 PRTR0MEMOBJ DARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_PHYS, NULL, cb);313 if (pMem Darwin)302 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb); 303 if (pMemNt) 314 304 { 315 pMem Darwin->Core.u.Phys.PhysBase = PhysAddr;316 pMem Darwin->Core.u.Phys.fAllocated = true;317 pMem Darwin->pMemDesc = pMemDesc;318 *ppMem = &pMem Darwin->Core;305 pMemNt->Core.u.Phys.PhysBase = PhysAddr; 306 pMemNt->Core.u.Phys.fAllocated = true; 307 pMemNt->pMemDesc = pMemDesc; 308 *ppMem = &pMemNt->Core; 319 309 return VINF_SUCCESS; 320 310 } … … 373 363 * Create the IPRT memory object. 374 364 */ 375 PRTR0MEMOBJ DARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_PHYS, NULL, cb);376 if (pMem Darwin)377 { 378 pMem Darwin->Core.u.Phys.PhysBase = PhysAddr;379 pMem Darwin->Core.u.Phys.fAllocated = false;380 pMem Darwin->pMemDesc = pMemDesc;381 *ppMem = &pMem Darwin->Core;365 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb); 366 if (pMemNt) 367 { 368 pMemNt->Core.u.Phys.PhysBase = PhysAddr; 369 pMemNt->Core.u.Phys.fAllocated = false; 370 pMemNt->pMemDesc = pMemDesc; 371 *ppMem = &pMemNt->Core; 382 372 return VINF_SUCCESS; 383 373 } … … 423 413 * Create the IPRT memory object. 424 414 */ 425 PRTR0MEMOBJ DARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_LOCK, pv, cb);426 if (pMem Darwin)427 { 428 pMem Darwin->Core.u.Lock.R0Process = (RTR0PROCESS)Task;429 *ppMem = &pMem Darwin->Core;415 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_LOCK, pv, cb); 416 if (pMemNt) 417 { 418 pMemNt->Core.u.Lock.R0Process = (RTR0PROCESS)Task; 419 *ppMem = &pMemNt->Core; 430 420 return VINF_SUCCESS; 431 421 } … … 451 441 * Create the IPRT memory object. 452 442 */ 453 PRTR0MEMOBJ DARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_LOCK, pv, cb);454 if (pMem Darwin)455 { 456 pMem Darwin->Core.u.Lock.R0Process = (RTR0PROCESS)Task;457 pMem Darwin->pMemDesc = pMemDesc;458 *ppMem = &pMem Darwin->Core;443 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_LOCK, pv, cb); 444 if (pMemNt) 445 { 446 pMemNt->Core.u.Lock.R0Process = (RTR0PROCESS)Task; 447 pMemNt->pMemDesc = pMemDesc; 448 *ppMem = &pMemNt->Core; 459 449 return VINF_SUCCESS; 460 450 } … … 502 492 */ 503 493 int rc = VERR_INVALID_PARAMETER; 504 PRTR0MEMOBJ DARWIN pMemToMapDarwin = (PRTR0MEMOBJDARWIN)pMemToMap;494 PRTR0MEMOBJNT pMemToMapDarwin = (PRTR0MEMOBJNT)pMemToMap; 505 495 if (pMemToMapDarwin->pMemDesc) 506 496 { … … 516 506 * Create the IPRT memory object. 517 507 */ 518 PRTR0MEMOBJ DARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_MAPPING,508 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING, 519 509 pv, pMemToMapDarwin->Core.cb); 520 if (pMem Darwin)510 if (pMemNt) 521 511 { 522 pMem Darwin->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;523 pMem Darwin->pMemMap = pMemMap;524 *ppMem = &pMem Darwin->Core;512 pMemNt->Core.u.Mapping.R0Process = NIL_RTR0PROCESS; 513 pMemNt->pMemMap = pMemMap; 514 *ppMem = &pMemNt->Core; 525 515 return VINF_SUCCESS; 526 516 } … … 545 535 */ 546 536 int rc = VERR_INVALID_PARAMETER; 547 PRTR0MEMOBJ DARWIN pMemToMapDarwin = (PRTR0MEMOBJDARWIN)pMemToMap;537 PRTR0MEMOBJNT pMemToMapDarwin = (PRTR0MEMOBJNT)pMemToMap; 548 538 if (pMemToMapDarwin->pMemDesc) 549 539 { … … 559 549 * Create the IPRT memory object. 560 550 */ 561 PRTR0MEMOBJ DARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_MAPPING,551 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING, 562 552 pv, pMemToMapDarwin->Core.cb); 563 if (pMem Darwin)553 if (pMemNt) 564 554 { 565 pMem Darwin->Core.u.Mapping.R0Process = R0Process;566 pMem Darwin->pMemMap = pMemMap;567 *ppMem = &pMem Darwin->Core;555 pMemNt->Core.u.Mapping.R0Process = R0Process; 556 pMemNt->pMemMap = pMemMap; 557 *ppMem = &pMemNt->Core; 568 558 return VINF_SUCCESS; 569 559 } … … 585 575 { 586 576 RTHCPHYS PhysAddr; 587 PRTR0MEMOBJ DARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)pMem;577 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem; 588 578 589 579 #ifdef USE_VM_MAP_WIRE … … 592 582 * needs to be handled differently. 593 583 */ 594 if (pMem Darwin->Core.enmType == RTR0MEMOBJTYPE_LOCK)584 if (pMemNt->Core.enmType == RTR0MEMOBJTYPE_LOCK) 595 585 { 596 586 ppnum_t PgNo; 597 if (pMem Darwin->Core.u.Lock.R0Process == NIL_RTR0PROCESS)598 PgNo = pmap_find_phys(kernel_pmap, (uintptr_t)pMem Darwin->Core.pv + iPage * PAGE_SIZE);587 if (pMemNt->Core.u.Lock.R0Process == NIL_RTR0PROCESS) 588 PgNo = pmap_find_phys(kernel_pmap, (uintptr_t)pMemNt->Core.pv + iPage * PAGE_SIZE); 599 589 else 600 590 { … … 624 614 AssertReturn(s_offPmap >= 0, NIL_RTHCPHYS); 625 615 } 626 pmap_t Pmap = *(pmap_t *)((uintptr_t)get_task_map((task_t)pMem Darwin->Core.u.Lock.R0Process) + s_offPmap);627 PgNo = pmap_find_phys(Pmap, (uintptr_t)pMem Darwin->Core.pv + iPage * PAGE_SIZE);616 pmap_t Pmap = *(pmap_t *)((uintptr_t)get_task_map((task_t)pMemNt->Core.u.Lock.R0Process) + s_offPmap); 617 PgNo = pmap_find_phys(Pmap, (uintptr_t)pMemNt->Core.pv + iPage * PAGE_SIZE); 628 618 } 629 619 … … 638 628 * Get the memory descriptor. 639 629 */ 640 IOMemoryDescriptor *pMemDesc = pMem Darwin->pMemDesc;630 IOMemoryDescriptor *pMemDesc = pMemNt->pMemDesc; 641 631 if (!pMemDesc) 642 pMemDesc = pMem Darwin->pMemMap->getMemoryDescriptor();632 pMemDesc = pMemNt->pMemMap->getMemoryDescriptor(); 643 633 AssertReturn(pMemDesc, NIL_RTHCPHYS); 644 634 -
trunk/src/VBox/Runtime/r0drv/os2/memobj-r0drv-os2.cpp
r3236 r4135 83 83 case RTR0MEMOBJTYPE_MAPPING: 84 84 if (pMemOs2->Core.u.Mapping.R0Process == NIL_RTR0PROCESS) 85 break; 85 break; 86 86 87 87 /* fall thru */ … … 344 344 * Create a dummy mapping object for it. 345 345 * 346 * All mappings are read/write/execute in OS/2 and there isn't 346 * All mappings are read/write/execute in OS/2 and there isn't 347 347 * any cache options, so sharing is ok. And the main memory object 348 348 * isn't actually freed until all the mappings have been freed up … … 395 395 } 396 396 break; 397 #endif 397 #endif 398 398 return VERR_NOT_SUPPORTED; 399 399 … … 444 444 445 445 446 RTHCPHYS rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, unsignediPage)446 RTHCPHYS rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage) 447 447 { 448 448 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)pMem;
Note:
See TracChangeset
for help on using the changeset viewer.