Changeset 27146 in vbox for trunk/src/VBox/Runtime/r0drv/freebsd/memobj-r0drv-freebsd.c
- Timestamp:
- Mar 7, 2010 4:55:06 PM (15 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Runtime/r0drv/freebsd/memobj-r0drv-freebsd.c
r26899 r27146 44 44 #include "internal/memobj.h" 45 45 46 /** 47 * Our pmap_enter version 48 */ 49 #if __FreeBSD_version >= 701105 50 # define MY_PMAP_ENTER(pPhysMap, AddrR3, pPage, fProt, fWired) \ 51 pmap_enter(pPhysMap, AddrR3, VM_PROT_NONE, pPage, fProt, fWired) 52 #else 53 # define MY_PMAP_ENTER(pPhysMap, AddrR3, pPage, fProt, fWired) \ 54 pmap_enter(pPhysMap, AddrR3, pPage, fProt, fWired) 55 #endif 46 56 47 57 /******************************************************************************* … … 82 92 *******************************************************************************/ 83 93 94 /** 95 * Gets the virtual memory map the specified object is mapped into. 96 * 97 * @returns VM map handle on success, NULL if no map. 98 * @param pMem The memory object. 99 */ 100 static vm_map_t rtR0MemObjFreeBSDGetMap(PRTR0MEMOBJINTERNAL pMem) 101 { 102 switch (pMem->enmType) 103 { 104 case RTR0MEMOBJTYPE_PAGE: 105 case RTR0MEMOBJTYPE_LOW: 106 case RTR0MEMOBJTYPE_CONT: 107 return kernel_map; 108 109 case RTR0MEMOBJTYPE_PHYS: 110 case RTR0MEMOBJTYPE_PHYS_NC: 111 return NULL; /* pretend these have no mapping atm. */ 112 113 case RTR0MEMOBJTYPE_LOCK: 114 return pMem->u.Lock.R0Process == NIL_RTR0PROCESS 115 ? kernel_map 116 : &((struct proc *)pMem->u.Lock.R0Process)->p_vmspace->vm_map; 117 118 case RTR0MEMOBJTYPE_RES_VIRT: 119 return pMem->u.ResVirt.R0Process == NIL_RTR0PROCESS 120 ? kernel_map 121 : &((struct proc *)pMem->u.ResVirt.R0Process)->p_vmspace->vm_map; 122 123 case RTR0MEMOBJTYPE_MAPPING: 124 return pMem->u.Mapping.R0Process == NIL_RTR0PROCESS 125 ? kernel_map 126 : &((struct proc *)pMem->u.Mapping.R0Process)->p_vmspace->vm_map; 127 128 default: 129 return NULL; 130 } 131 } 84 132 85 133 int rtR0MemObjNativeFree(RTR0MEMOBJ pMem) … … 95 143 96 144 case RTR0MEMOBJTYPE_PAGE: 97 if (pMemFreeBSD->u.NonPhys.pObject) 98 { 99 rc = vm_map_remove(kernel_map, 100 (vm_offset_t)pMemFreeBSD->Core.pv, 101 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb); 102 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc)); 103 } 104 else 105 { 106 contigfree(pMemFreeBSD->Core.pv, pMemFreeBSD->Core.cb, M_IPRTMOBJ); 107 rc = vm_map_remove(kernel_map, 108 (vm_offset_t)pMemFreeBSD->Core.pv, 109 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb); 110 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc)); 111 } 145 { 146 rc = vm_map_remove(kernel_map, 147 (vm_offset_t)pMemFreeBSD->Core.pv, 148 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb); 149 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc)); 150 151 vm_page_lock_queues(); 152 for (uint32_t iPage = 0; iPage < pMemFreeBSD->u.Phys.cPages; iPage++) 153 { 154 vm_page_t pPage = pMemFreeBSD->u.Phys.apPages[iPage]; 155 vm_page_unwire(pPage, 0); 156 vm_page_free(pPage); 157 } 158 vm_page_unlock_queues(); 112 159 break; 160 } 113 161 114 162 case RTR0MEMOBJTYPE_LOCK: 115 163 { 116 int fFlags = VM_MAP_WIRE_NOHOLES;117 164 vm_map_t pMap = kernel_map; 118 165 119 166 if (pMemFreeBSD->Core.u.Lock.R0Process != NIL_RTR0PROCESS) 120 {121 167 pMap = &((struct proc *)pMemFreeBSD->Core.u.Lock.R0Process)->p_vmspace->vm_map; 122 fFlags |= VM_MAP_WIRE_USER;123 }124 else125 fFlags |= VM_MAP_WIRE_SYSTEM;126 168 127 169 rc = vm_map_unwire(pMap, 128 170 (vm_offset_t)pMemFreeBSD->Core.pv, 129 171 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb, 130 fFlags);172 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); 131 173 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc)); 132 174 break; … … 162 204 case RTR0MEMOBJTYPE_PHYS_NC: 163 205 { 206 vm_page_lock_queues(); 164 207 for (uint32_t iPage = 0; iPage < pMemFreeBSD->u.Phys.cPages; iPage++) 165 vm_page_free_toq(pMemFreeBSD->u.Phys.apPages[iPage]); 208 { 209 vm_page_t pPage = pMemFreeBSD->u.Phys.apPages[iPage]; 210 vm_page_unwire(pPage, 0); 211 vm_page_free(pPage); 212 } 213 vm_page_unlock_queues(); 166 214 break; 167 215 } … … 184 232 185 233 /* create the object. */ 186 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_PAGE, NULL, cb); 234 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJFREEBSD, u.Phys.apPages[cPages]), 235 RTR0MEMOBJTYPE_PAGE, NULL, cb); 187 236 if (!pMemFreeBSD) 188 237 return VERR_NO_MEMORY; 189 238 190 pMemFreeBSD->u.NonPhys.pObject = vm_object_allocate(OBJT_DEFAULT, cPages); 191 if (pMemFreeBSD->u.NonPhys.pObject) 192 { 193 vm_offset_t MapAddress = vm_map_min(kernel_map); 194 rc = vm_map_find(kernel_map, /* map */ 195 pMemFreeBSD->u.NonPhys.pObject, /* object */ 196 0, /* offset */ 197 &MapAddress, /* addr (IN/OUT) */ 198 cb, /* length */ 199 TRUE, /* find_space */ 200 fExecutable /* protection */ 201 ? VM_PROT_ALL 202 : VM_PROT_RW, 203 VM_PROT_ALL, /* max(_prot) */ 204 FALSE); /* cow (copy-on-write) */ 205 if (rc == KERN_SUCCESS) 206 { 207 rc = VINF_SUCCESS; 208 209 VM_OBJECT_LOCK(pMemFreeBSD->u.NonPhys.pObject); 239 pMemFreeBSD->u.Phys.cPages = cPages; 240 241 vm_offset_t MapAddress = vm_map_min(kernel_map); 242 rc = vm_map_find(kernel_map, /* map */ 243 NULL, /* object */ 244 0, /* offset */ 245 &MapAddress, /* addr (IN/OUT) */ 246 cb, /* length */ 247 TRUE, /* find_space */ 248 fExecutable /* protection */ 249 ? VM_PROT_ALL 250 : VM_PROT_RW, 251 VM_PROT_ALL, /* max(_prot) */ 252 0); /* cow (copy-on-write) */ 253 if (rc == KERN_SUCCESS) 254 { 255 rc = VINF_SUCCESS; 256 257 for (size_t iPage = 0; iPage < cPages; iPage++) 258 { 259 vm_page_t pPage; 260 261 pPage = vm_page_alloc(NULL, iPage, 262 VM_ALLOC_SYSTEM | 263 VM_ALLOC_WIRED | VM_ALLOC_NOOBJ); 264 265 if (!pPage) 266 { 267 /* 268 * Out of pages 269 * Remove already allocated pages 270 */ 271 while (iPage-- > 0) 272 { 273 pPage = pMemFreeBSD->u.Phys.apPages[iPage]; 274 vm_page_lock_queues(); 275 vm_page_unwire(pPage, 0); 276 vm_page_free(pPage); 277 vm_page_unlock_queues(); 278 } 279 rc = VERR_NO_MEMORY; 280 break; 281 } 282 283 pPage->valid = VM_PAGE_BITS_ALL; 284 pMemFreeBSD->u.Phys.apPages[iPage] = pPage; 285 } 286 287 if (rc == VINF_SUCCESS) 288 { 289 vm_offset_t AddressDst = MapAddress; 290 210 291 for (size_t iPage = 0; iPage < cPages; iPage++) 211 292 { 212 vm_page_t pPage; 213 214 pPage = vm_page_alloc(pMemFreeBSD->u.NonPhys.pObject, iPage, 215 VM_ALLOC_NOBUSY | VM_ALLOC_SYSTEM | 216 VM_ALLOC_WIRED); 217 218 if (!pPage) 219 { 220 /* 221 * Out of pages 222 * Remove already allocated pages 223 */ 224 while (iPage-- > 0) 225 { 226 vm_map_lock(kernel_map); 227 pPage = vm_page_lookup(pMemFreeBSD->u.NonPhys.pObject, iPage); 228 vm_page_lock_queues(); 229 vm_page_unwire(pPage, 0); 230 vm_page_free(pPage); 231 vm_page_unlock_queues(); 232 } 233 rc = VERR_NO_MEMORY; 234 break; 235 } 236 237 pPage->valid = VM_PAGE_BITS_ALL; 238 } 239 VM_OBJECT_UNLOCK(pMemFreeBSD->u.NonPhys.pObject); 240 241 if (rc == VINF_SUCCESS) 242 { 243 vm_map_entry_t pMapEntry; 244 boolean_t fEntryFound; 245 246 fEntryFound = vm_map_lookup_entry(kernel_map, MapAddress, &pMapEntry); 247 if (fEntryFound) 248 { 249 pMapEntry->wired_count = 1; 250 vm_map_simplify_entry(kernel_map, pMapEntry); 251 252 /* Put the page into the page table now. */ 253 VM_OBJECT_LOCK(pMemFreeBSD->u.NonPhys.pObject); 254 vm_offset_t AddressDst = MapAddress; 255 256 for (size_t iPage = 0; iPage < cPages; iPage++) 257 { 258 vm_page_t pPage; 259 260 pPage = vm_page_lookup(pMemFreeBSD->u.NonPhys.pObject, iPage); 261 262 #if __FreeBSD_version >= 701105 263 pmap_enter(kernel_map->pmap, AddressDst, VM_PROT_NONE, pPage, 264 fExecutable 265 ? VM_PROT_ALL 266 : VM_PROT_RW, 267 TRUE); 268 #else 269 pmap_enter(kernel_map->pmap, AddressDst, pPage, 270 fExecutable 271 ? VM_PROT_ALL 272 : VM_PROT_RW, 273 TRUE); 274 #endif 275 276 AddressDst += PAGE_SIZE; 277 } 278 VM_OBJECT_UNLOCK(pMemFreeBSD->u.NonPhys.pObject); 279 280 /* Store start address */ 281 pMemFreeBSD->Core.pv = (void *)MapAddress; 282 *ppMem = &pMemFreeBSD->Core; 283 return VINF_SUCCESS; 284 } 285 else 286 { 287 AssertFailed(); 288 } 289 } 290 } 291 rc = VERR_NO_MEMORY; /** @todo fix translation (borrow from darwin) */ 292 } 293 else 294 rc = VERR_NO_MEMORY; 293 vm_page_t pPage = pMemFreeBSD->u.Phys.apPages[iPage]; 294 295 MY_PMAP_ENTER(kernel_map->pmap, AddressDst, pPage, 296 fExecutable 297 ? VM_PROT_ALL 298 : VM_PROT_RW, 299 TRUE); 300 301 AddressDst += PAGE_SIZE; 302 } 303 304 /* Store start address */ 305 pMemFreeBSD->Core.pv = (void *)MapAddress; 306 *ppMem = &pMemFreeBSD->Core; 307 return VINF_SUCCESS; 308 } 309 } 310 rc = VERR_NO_MEMORY; /** @todo fix translation (borrow from darwin) */ 295 311 296 312 rtR0MemObjDelete(&pMemFreeBSD->Core); … … 352 368 } 353 369 370 static void rtR0MemObjFreeBSDPhysPageInit(vm_page_t pPage, vm_pindex_t iPage) 371 { 372 pPage->wire_count = 1; 373 pPage->pindex = iPage; 374 pPage->act_count = 0; 375 pPage->oflags = 0; 376 pPage->flags = PG_UNMANAGED; 377 atomic_add_int(&cnt.v_wire_count, 1); 378 } 379 354 380 static int rtR0MemObjFreeBSDAllocPhysPages(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJTYPE enmType, 355 381 size_t cb, … … 380 406 if (pPage) 381 407 for (uint32_t iPage = 0; iPage < cPages; iPage++) 408 { 409 rtR0MemObjFreeBSDPhysPageInit(&pPage[iPage], iPage); 382 410 pMemFreeBSD->u.Phys.apPages[iPage] = &pPage[iPage]; 411 } 383 412 else 384 413 rc = VERR_NO_MEMORY; … … 395 424 /* Free all allocated pages */ 396 425 while (iPage-- > 0) 397 vm_page_free_toq(pMemFreeBSD->u.Phys.apPages[iPage]); 426 { 427 pPage = pMemFreeBSD->u.Phys.apPages[iPage]; 428 vm_page_lock_queues(); 429 vm_page_unwire(pPage, 0); 430 vm_page_free(pPage); 431 vm_page_unlock_queues(); 432 } 398 433 rc = VERR_NO_MEMORY; 399 434 break; 400 435 } 436 rtR0MemObjFreeBSDPhysPageInit(pPage, iPage); 401 437 pMemFreeBSD->u.Phys.apPages[iPage] = pPage; 402 438 } … … 421 457 int rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment) 422 458 { 423 #if 0459 #if 1 424 460 return rtR0MemObjFreeBSDAllocPhysPages(ppMem, RTR0MEMOBJTYPE_PHYS, cb, PhysHighest, uAlignment, true); 425 461 #else … … 452 488 int rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest) 453 489 { 454 #if 0490 #if 1 455 491 return rtR0MemObjFreeBSDAllocPhysPages(ppMem, RTR0MEMOBJTYPE_PHYS_NC, cb, PhysHighest, PAGE_SIZE, false); 456 492 #else … … 475 511 476 512 477 int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process) 513 /** 514 * Worker locking the memory in either kernel or user maps. 515 */ 516 static int rtR0MemObjNativeLockInMap(PPRTR0MEMOBJINTERNAL ppMem, vm_map_t pVmMap, 517 vm_offset_t AddrStart, size_t cb, uint32_t fAccess, 518 RTR0PROCESS R0Process) 478 519 { 479 520 int rc; … … 481 522 482 523 /* create the object. */ 483 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_LOCK, (void *) R3Ptr, cb);524 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_LOCK, (void *)AddrStart, cb); 484 525 if (!pMemFreeBSD) 485 526 return VERR_NO_MEMORY; … … 489 530 * resource usage restrictions, so we'll call vm_map_wire directly. 490 531 */ 491 rc = vm_map_wire( &((struct proc *)R0Process)->p_vmspace->vm_map,/* the map */492 (vm_offset_t)R3Ptr,/* start */493 (vm_offset_t)R3Ptr + cb,/* end */494 VM_MAP_WIRE_ USER | VM_MAP_WIRE_NOHOLES);/* flags */532 rc = vm_map_wire(pVmMap, /* the map */ 533 AddrStart, /* start */ 534 AddrStart + cb, /* end */ 535 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); /* flags */ 495 536 if (rc == KERN_SUCCESS) 496 537 { … … 504 545 505 546 547 int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process) 548 { 549 return rtR0MemObjNativeLockInMap(ppMem, 550 &((struct proc *)R0Process)->p_vmspace->vm_map, 551 (vm_offset_t)R3Ptr, 552 cb, 553 fAccess, 554 R0Process); 555 } 556 557 506 558 int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess) 507 559 { 508 int rc; 509 NOREF(fAccess); 510 511 /* create the object. */ 512 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_LOCK, pv, cb); 513 if (!pMemFreeBSD) 514 return VERR_NO_MEMORY; 515 516 /* lock the memory */ 517 rc = vm_map_wire(kernel_map, /* the map */ 518 (vm_offset_t)pv, /* start */ 519 (vm_offset_t)pv + cb, /* end */ 520 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); /* flags - SYSTEM? */ 521 if (rc == KERN_SUCCESS) 522 { 523 pMemFreeBSD->Core.u.Lock.R0Process = NIL_RTR0PROCESS; 524 *ppMem = &pMemFreeBSD->Core; 525 return VINF_SUCCESS; 526 } 527 rtR0MemObjDelete(&pMemFreeBSD->Core); 528 return VERR_NO_MEMORY;/** @todo fix mach -> vbox error conversion for freebsd. */ 560 return rtR0MemObjNativeLockInMap(ppMem, 561 kernel_map, 562 (vm_offset_t)pv, 563 cb, 564 fAccess, 565 NIL_RTR0PROCESS); 529 566 } 530 567 … … 672 709 PROC_UNLOCK(pProc); 673 710 674 vm_object_t pObjectNew = vm_object_allocate(OBJT_DEFAULT, pMemToMap->cb >> PAGE_SHIFT);675 if (!RT_UNLIKELY(pObjectNew))676 return VERR_NO_MEMORY;677 678 711 /* Insert the object in the map. */ 679 712 rc = vm_map_find(pProcMap, /* Map to insert the object in */ 680 pObjectNew,/* Object to map */713 NULL, /* Object to map */ 681 714 0, /* Start offset in the object */ 682 715 &AddrR3, /* Start address IN/OUT */ … … 695 728 696 729 if ( pMemToMap->enmType == RTR0MEMOBJTYPE_PHYS 697 || pMemToMap->enmType == RTR0MEMOBJTYPE_PHYS_NC) 730 || pMemToMap->enmType == RTR0MEMOBJTYPE_PHYS_NC 731 || pMemToMap->enmType == RTR0MEMOBJTYPE_PAGE) 698 732 { 699 733 /* Mapping physical allocations */ 700 Assert(cPages == pMemToMap ->u.Phys.cPages);734 Assert(cPages == pMemToMapFreeBSD->u.Phys.cPages); 701 735 702 736 /* Insert the memory page by page into the mapping. */ … … 705 739 vm_page_t pPage = pMemToMapFreeBSD->u.Phys.apPages[iPage]; 706 740 707 #if __FreeBSD_version >= 701105 708 pmap_enter(pPhysicalMap, AddrR3Dst, VM_PROT_NONE, pPage, ProtectionFlags, TRUE); 709 #else 710 pmap_enter(pPhysicalMap, AddrR3Dst, pPage, ProtectionFlags, TRUE); 711 #endif 741 MY_PMAP_ENTER(pPhysicalMap, AddrR3Dst, pPage, ProtectionFlags, TRUE); 712 742 AddrR3Dst += PAGE_SIZE; 713 743 } 714 }715 else if (pMemToMapFreeBSD->u.NonPhys.pObject)716 {717 /* Mapping page memory object */718 VM_OBJECT_LOCK(pMemToMapFreeBSD->u.NonPhys.pObject);719 720 /* Insert the memory page by page into the mapping. */721 for (uint32_t iPage = 0; iPage < cPages; iPage++)722 {723 vm_page_t pPage = vm_page_lookup(pMemToMapFreeBSD->u.NonPhys.pObject, iPage);724 725 #if __FreeBSD_version >= 701105726 pmap_enter(pPhysicalMap, AddrR3Dst, VM_PROT_NONE, pPage, ProtectionFlags, TRUE);727 #else728 pmap_enter(pPhysicalMap, AddrR3Dst, pPage, ProtectionFlags, TRUE);729 #endif730 AddrR3Dst += PAGE_SIZE;731 }732 VM_OBJECT_UNLOCK(pMemToMapFreeBSD->u.NonPhys.pObject);733 744 } 734 745 else … … 741 752 vm_page_t pPage = PHYS_TO_VM_PAGE(vtophys(AddrToMap)); 742 753 743 #if __FreeBSD_version >= 701105 744 pmap_enter(pPhysicalMap, AddrR3Dst, VM_PROT_NONE, pPage, ProtectionFlags, TRUE); 745 #else 746 pmap_enter(pPhysicalMap, AddrR3Dst, pPage, ProtectionFlags, TRUE); 747 #endif 754 MY_PMAP_ENTER(pPhysicalMap, AddrR3Dst, pPage, ProtectionFlags, TRUE); 748 755 AddrR3Dst += PAGE_SIZE; 749 756 AddrToMap += PAGE_SIZE; … … 764 771 { 765 772 Assert((vm_offset_t)pMemFreeBSD->Core.pv == AddrR3); 766 pMemFreeBSD->u.NonPhys.pObject = pObjectNew;767 773 pMemFreeBSD->Core.u.Mapping.R0Process = R0Process; 768 774 *ppMem = &pMemFreeBSD->Core; … … 774 780 } 775 781 776 if (RT_FAILURE(rc))777 vm_object_deallocate(pObjectNew);778 779 782 return VERR_NO_MEMORY; 780 783 } … … 783 786 int rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt) 784 787 { 785 NOREF(pMem); 786 NOREF(offSub); 787 NOREF(cbSub); 788 NOREF(fProt); 788 vm_prot_t ProtectionFlags = 0; 789 vm_offset_t AddrStart = (uintptr_t)pMem->pv + offSub; 790 vm_offset_t AddrEnd = AddrStart + cbSub; 791 vm_map_t pVmMap = rtR0MemObjFreeBSDGetMap(pMem); 792 793 if (!pVmMap) 794 return VERR_NOT_SUPPORTED; 795 796 if ((fProt & RTMEM_PROT_NONE) == RTMEM_PROT_NONE) 797 ProtectionFlags = VM_PROT_NONE; 798 if ((fProt & RTMEM_PROT_READ) == RTMEM_PROT_READ) 799 ProtectionFlags |= VM_PROT_READ; 800 if ((fProt & RTMEM_PROT_WRITE) == RTMEM_PROT_WRITE) 801 ProtectionFlags |= VM_PROT_WRITE; 802 if ((fProt & RTMEM_PROT_EXEC) == RTMEM_PROT_EXEC) 803 ProtectionFlags |= VM_PROT_EXECUTE; 804 805 int krc = vm_map_protect(pVmMap, AddrStart, AddrEnd, ProtectionFlags, FALSE); 806 if (krc == KERN_SUCCESS) 807 return VINF_SUCCESS; 808 789 809 return VERR_NOT_SUPPORTED; 790 810 } … … 815 835 } 816 836 817 case RTR0MEMOBJTYPE_PAGE:818 {819 vm_offset_t pb = (vm_offset_t)pMemFreeBSD->Core.pv + (iPage << PAGE_SHIFT);820 return vtophys(pb);821 }822 823 837 case RTR0MEMOBJTYPE_MAPPING: 824 838 { … … 842 856 return pMemFreeBSD->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT); 843 857 858 case RTR0MEMOBJTYPE_PAGE: 844 859 case RTR0MEMOBJTYPE_PHYS_NC: 845 {846 860 return VM_PAGE_TO_PHYS(pMemFreeBSD->u.Phys.apPages[iPage]); 847 }848 861 849 862 case RTR0MEMOBJTYPE_RES_VIRT:
Note:
See TracChangeset
for help on using the changeset viewer.