Changeset 26887 in vbox for trunk/src/VBox/Runtime/r0drv/freebsd/memobj-r0drv-freebsd.c
- Timestamp:
- Feb 28, 2010 2:39:31 AM (15 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Runtime/r0drv/freebsd/memobj-r0drv-freebsd.c
r26886 r26887 55 55 /** The core structure. */ 56 56 RTR0MEMOBJINTERNAL Core; 57 /** The VM object associated with the allocation. */ 58 vm_object_t pObject; 59 /** the VM object associated with the mapping. 60 * In mapping mem object, this is the shadow object? 61 * In a allocation/enter mem object, this is the shared object we constructed (contig, perhaps alloc). */ 62 vm_object_t pMappingObject; 57 /** Type dependent data */ 58 union 59 { 60 /** Everything not physical */ 61 struct 62 { 63 /** The VM object associated with the allocation. */ 64 vm_object_t pObject; 65 } NonPhys; 66 /** Physical contiguous/non-contiguous memory */ 67 struct 68 { 69 /** Number of allocated pages */ 70 uint32_t cPages; 71 /** Array of allocated pages. */ 72 vm_page_t apPages[1]; 73 } Phys; 74 } u; 63 75 } RTR0MEMOBJFREEBSD, *PRTR0MEMOBJFREEBSD; 64 76 … … 80 92 case RTR0MEMOBJTYPE_CONT: 81 93 contigfree(pMemFreeBSD->Core.pv, pMemFreeBSD->Core.cb, M_IPRTMOBJ); 82 if (pMemFreeBSD->pMappingObject) 94 break; 95 96 case RTR0MEMOBJTYPE_PAGE: 97 if (pMemFreeBSD->u.NonPhys.pObject) 83 98 { 84 99 rc = vm_map_remove(kernel_map, … … 87 102 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc)); 88 103 } 89 break; 90 91 case RTR0MEMOBJTYPE_PAGE: 92 if (pMemFreeBSD->pObject) 93 { 104 else 105 { 106 contigfree(pMemFreeBSD->Core.pv, pMemFreeBSD->Core.cb, M_IPRTMOBJ); 94 107 rc = vm_map_remove(kernel_map, 95 108 (vm_offset_t)pMemFreeBSD->Core.pv, 96 109 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb); 97 110 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc)); 98 }99 else100 {101 contigfree(pMemFreeBSD->Core.pv, pMemFreeBSD->Core.cb, M_IPRTMOBJ);102 if (pMemFreeBSD->pMappingObject)103 {104 rc = vm_map_remove(kernel_map,105 (vm_offset_t)pMemFreeBSD->Core.pv,106 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb);107 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));108 }109 111 } 110 112 break; … … 147 149 vm_map_t pMap = kernel_map; 148 150 149 /* vm_map_remove will unmap the pages we inserted with pmap_enter */150 AssertMsg(pMemFreeBSD->pMappingObject != NULL, ("MappingObject is NULL\n"));151 151 if (pMemFreeBSD->Core.u.Mapping.R0Process != NIL_RTR0PROCESS) 152 152 pMap = &((struct proc *)pMemFreeBSD->Core.u.Mapping.R0Process)->p_vmspace->vm_map; … … 159 159 } 160 160 161 case RTR0MEMOBJTYPE_PHYS: 162 case RTR0MEMOBJTYPE_PHYS_NC: 163 { 164 vm_page_lock_queues(); 165 for (uint32_t iPage = 0; iPage < pMemFreeBSD->u.Phys.cPages; iPage++) 166 { 167 vm_page_t pPage = pMemFreeBSD->u.Phys.apPages[iPage]; 168 pPage->wire_count--; 169 vm_page_free_toq(pPage); 170 } 171 vm_page_unlock_queues(); 172 break; 173 } 174 161 175 /* unused: */ 162 176 case RTR0MEMOBJTYPE_LOW: 163 case RTR0MEMOBJTYPE_PHYS:164 case RTR0MEMOBJTYPE_PHYS_NC:165 177 default: 166 178 AssertMsgFailed(("enmType=%d\n", pMemFreeBSD->Core.enmType)); … … 182 194 return VERR_NO_MEMORY; 183 195 184 pMemFreeBSD-> pObject = vm_object_allocate(OBJT_DEFAULT, cPages);185 if (pMemFreeBSD-> pObject)196 pMemFreeBSD->u.NonPhys.pObject = vm_object_allocate(OBJT_DEFAULT, cPages); 197 if (pMemFreeBSD->u.NonPhys.pObject) 186 198 { 187 199 vm_offset_t MapAddress = vm_map_min(kernel_map); 188 200 rc = vm_map_find(kernel_map, /* map */ 189 pMemFreeBSD-> pObject,/* object */201 pMemFreeBSD->u.NonPhys.pObject, /* object */ 190 202 0, /* offset */ 191 203 &MapAddress, /* addr (IN/OUT) */ … … 203 215 rc = VINF_SUCCESS; 204 216 205 VM_OBJECT_LOCK(pMemFreeBSD-> pObject);217 VM_OBJECT_LOCK(pMemFreeBSD->u.NonPhys.pObject); 206 218 for (size_t iPage = 0; iPage < cPages; iPage++) 207 219 { … … 209 221 vm_page_t pPage; 210 222 211 pPage = vm_page_alloc(pMemFreeBSD-> pObject, PageIndex,223 pPage = vm_page_alloc(pMemFreeBSD->u.NonPhys.pObject, PageIndex, 212 224 VM_ALLOC_NOBUSY | VM_ALLOC_SYSTEM | 213 225 VM_ALLOC_WIRED); … … 250 262 AddressDst += PAGE_SIZE; 251 263 } 252 VM_OBJECT_UNLOCK(pMemFreeBSD-> pObject);264 VM_OBJECT_UNLOCK(pMemFreeBSD->u.NonPhys.pObject); 253 265 254 266 if (rc == VINF_SUCCESS) … … 326 338 } 327 339 328 329 int rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment) 330 { 331 /** @todo check if there is a more appropriate API somewhere.. */332 333 /** @todo alignment */334 if (uAlignment != PAGE_SIZE)335 return VERR_NOT_SUPPORTED;340 static int rtR0MemObjFreeBSDAllocPhysPages(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJTYPE enmType, 341 size_t cb, 342 RTHCPHYS PhysHighest, size_t uAlignment, 343 bool fContiguous) 344 { 345 int rc = VINF_SUCCESS; 346 uint32_t cPages = cb >> PAGE_SHIFT; 347 vm_paddr_t VmPhysAddrHigh; 336 348 337 349 /* create the object. */ 338 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew( sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_CONT, NULL, cb);350 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJFREEBSD, u.Phys.apPages[cPages]), enmType, NULL, cb); 339 351 if (!pMemFreeBSD) 340 352 return VERR_NO_MEMORY; 341 353 342 /* do the allocation. */ 343 pMemFreeBSD->Core.pv = contigmalloc(cb, /* size */ 344 M_IPRTMOBJ, /* type */ 345 M_NOWAIT | M_ZERO, /* flags */ 346 0, /* lowest physical address*/ 347 PhysHighest, /* highest physical address */ 348 PAGE_SIZE, /* alignment. */ 349 0); /* boundrary */ 350 if (pMemFreeBSD->Core.pv) 351 { 352 pMemFreeBSD->Core.u.Cont.Phys = vtophys(pMemFreeBSD->Core.pv); 354 pMemFreeBSD->u.Phys.cPages = cPages; 355 356 /* 357 * For now allocate contiguous pages 358 * if there is an upper limit or 359 * the alignment is not on a page boundary. 360 */ 361 if (PhysHighest != NIL_RTHCPHYS) 362 { 363 VmPhysAddrHigh = PhysHighest; 364 fContiguous = true; 365 } 366 else 367 VmPhysAddrHigh = ~(vm_paddr_t)0; 368 369 if (uAlignment != PAGE_SIZE) 370 fContiguous = true; 371 372 mtx_lock(&vm_page_queue_free_mtx); 373 if (fContiguous) 374 { 375 vm_page_t pPage = vm_phys_alloc_contig(cPages, 0, VmPhysAddrHigh, uAlignment, 0); 376 377 if (pPage) 378 for (uint32_t iPage = 0; iPage < cPages; iPage++) 379 { 380 pPage[iPage].flags &= ~PG_FREE; 381 pPage[iPage].wire_count = 1; 382 atomic_add_int(&cnt.v_wire_count, 1); 383 pMemFreeBSD->u.Phys.apPages[iPage] = &pPage[iPage]; 384 } 385 else 386 rc = VERR_NO_MEMORY; 387 } 388 else 389 { 390 /* Allocate page by page */ 391 for (uint32_t iPage = 0; iPage < cPages; iPage++) 392 { 393 vm_page_t pPage = vm_phys_alloc_pages(VM_FREEPOOL_DEFAULT, 0); 394 395 if (!pPage) 396 { 397 vm_page_lock_queues(); 398 while (iPage-- > 0) 399 { 400 pMemFreeBSD->u.Phys.apPages[iPage]->wire_count--; 401 vm_page_free_toq(pMemFreeBSD->u.Phys.apPages[iPage]); 402 } 403 vm_page_unlock_queues(); 404 405 rc = VERR_NO_MEMORY; 406 break; 407 } 408 409 pPage->flags &= ~PG_FREE; 410 pPage->valid = VM_PAGE_BITS_ALL; 411 pPage->wire_count = 1; 412 atomic_add_int(&cnt.v_wire_count, 1); 413 pMemFreeBSD->u.Phys.apPages[iPage] = pPage; 414 } 415 } 416 mtx_unlock(&vm_page_queue_free_mtx); 417 418 if (RT_FAILURE(rc)) 419 rtR0MemObjDelete(&pMemFreeBSD->Core); 420 else 421 { 422 if (enmType == RTR0MEMOBJTYPE_PHYS) 423 { 424 pMemFreeBSD->Core.u.Phys.PhysBase = VM_PAGE_TO_PHYS(pMemFreeBSD->u.Phys.apPages[0]); 425 pMemFreeBSD->Core.u.Phys.fAllocated = true; 426 } 427 353 428 *ppMem = &pMemFreeBSD->Core; 354 return VINF_SUCCESS; 355 } 356 357 rtR0MemObjDelete(&pMemFreeBSD->Core); 358 return VERR_NO_MEMORY; 429 } 430 431 return rc; 432 } 433 434 int rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment) 435 { 436 return rtR0MemObjFreeBSDAllocPhysPages(ppMem, RTR0MEMOBJTYPE_PHYS, cb, PhysHighest, uAlignment, true); 359 437 } 360 438 … … 362 440 int rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest) 363 441 { 364 /** @todo rtR0MemObjNativeAllocPhys / freebsd */ 365 return VERR_NOT_SUPPORTED; 442 return rtR0MemObjFreeBSDAllocPhysPages(ppMem, RTR0MEMOBJTYPE_PHYS_NC, cb, PhysHighest, PAGE_SIZE, false); 366 443 } 367 444 … … 470 547 * Allocate an empty VM object and map it into the requested map. 471 548 */ 472 pMemFreeBSD-> pObject = vm_object_allocate(OBJT_DEFAULT, cb >> PAGE_SHIFT);473 if (pMemFreeBSD-> pObject)549 pMemFreeBSD->u.NonPhys.pObject = vm_object_allocate(OBJT_DEFAULT, cb >> PAGE_SHIFT); 550 if (pMemFreeBSD->u.NonPhys.pObject) 474 551 { 475 552 vm_offset_t MapAddress = pvFixed != (void *)-1 … … 482 559 483 560 rc = vm_map_find(pMap, /* map */ 484 pMemFreeBSD-> pObject,/* object */561 pMemFreeBSD->u.NonPhys.pObject, /* object */ 485 562 0, /* offset */ 486 563 &MapAddress, /* addr (IN/OUT) */ … … 505 582 return VINF_SUCCESS; 506 583 } 507 vm_object_deallocate(pMemFreeBSD-> pObject);584 vm_object_deallocate(pMemFreeBSD->u.NonPhys.pObject); 508 585 rc = VERR_NO_MEMORY; /** @todo fix translation (borrow from darwin) */ 509 586 } … … 623 700 return VERR_NOT_SUPPORTED; 624 701 625 int rc;626 vm_object_t pObjectToMap = ((PRTR0MEMOBJFREEBSD)pMemToMap)->pObject;627 struct proc *pProc= (struct proc *)R0Process;628 struct vm_map *pProcMap= &pProc->p_vmspace->vm_map;702 int rc; 703 PRTR0MEMOBJFREEBSD pMemToMapFreeBSD = (PRTR0MEMOBJFREEBSD)pMemToMap; 704 struct proc *pProc = (struct proc *)R0Process; 705 struct vm_map *pProcMap = &pProc->p_vmspace->vm_map; 629 706 630 707 /* calc protection */ … … 650 727 /* Insert the object in the map. */ 651 728 rc = vm_map_find(pProcMap, /* Map to insert the object in */ 652 pObjectNew ,/* Object to map */653 654 655 656 657 658 659 729 pObjectNew, /* Object to map */ 730 0, /* Start offset in the object */ 731 &AddrR3, /* Start address IN/OUT */ 732 pMemToMap->cb, /* Size of the mapping */ 733 TRUE, /* Whether a suitable address should be searched for first */ 734 ProtectionFlags, /* protection flags */ 735 VM_PROT_ALL, /* Maximum protection flags */ 736 0); /* Copy on write */ 660 737 661 738 /* Map the memory page by page into the destination map. */ 662 739 if (rc == KERN_SUCCESS) 663 740 { 664 size_t c Left = pMemToMap->cb >> PAGE_SHIFT;741 size_t cPages; 665 742 vm_offset_t AddrToMap = (vm_offset_t)pMemToMap->pv; 666 743 pmap_t pPhysicalMap = pProcMap->pmap; 667 744 vm_offset_t AddrR3Dst = AddrR3; 668 745 746 if ( pMemToMap->enmType == RTR0MEMOBJTYPE_PHYS 747 || pMemToMap->enmType == RTR0MEMOBJTYPE_PHYS_NC) 748 cPages = pMemToMapFreeBSD->u.Phys.cPages; 749 else 750 cPages = pMemToMap->cb >> PAGE_SHIFT; 751 669 752 /* Insert the memory page by page into the mapping. */ 670 while (cLeft-- > 0) 671 { 672 vm_page_t Page = PHYS_TO_VM_PAGE(vtophys(AddrToMap)); 753 for (uint32_t iPage = 0; iPage < cPages; iPage++) 754 { 755 vm_page_t pPage; 756 757 if ( pMemToMap->enmType == RTR0MEMOBJTYPE_PHYS 758 || pMemToMap->enmType == RTR0MEMOBJTYPE_PHYS_NC) 759 pPage = pMemToMapFreeBSD->u.Phys.apPages[iPage]; 760 else 761 { 762 pPage = PHYS_TO_VM_PAGE(vtophys(AddrToMap)); 763 AddrToMap += PAGE_SIZE; 764 } 673 765 674 766 #if __FreeBSD_version >= 701105 675 pmap_enter(pPhysicalMap, AddrR3Dst, VM_PROT_NONE, Page, ProtectionFlags, TRUE);767 pmap_enter(pPhysicalMap, AddrR3Dst, VM_PROT_NONE, pPage, ProtectionFlags, TRUE); 676 768 #else 677 pmap_enter(pPhysicalMap, AddrR3Dst, Page, ProtectionFlags, TRUE);769 pmap_enter(pPhysicalMap, AddrR3Dst, pPage, ProtectionFlags, TRUE); 678 770 #endif 679 AddrToMap += PAGE_SIZE;680 771 AddrR3Dst += PAGE_SIZE; 681 772 } 682 pObjectToMap = pObjectNew;683 773 } 684 774 else … … 698 788 Assert((vm_offset_t)pMemFreeBSD->Core.pv == AddrR3); 699 789 pMemFreeBSD->Core.u.Mapping.R0Process = R0Process; 700 pMemFreeBSD->pMappingObject = pObjectToMap;701 790 *ppMem = &pMemFreeBSD->Core; 702 791 return VINF_SUCCESS; … … 706 795 AssertMsg(rc == KERN_SUCCESS, ("Deleting mapping failed\n")); 707 796 } 708 709 if (pObjectToMap)710 vm_object_deallocate(pObjectToMap);711 797 712 798 return VERR_NO_MEMORY; … … 776 862 777 863 case RTR0MEMOBJTYPE_PHYS_NC: 864 { 865 RTHCPHYS PhysAddr = NIL_RTHCPHYS; 866 867 if (iPage < pMemFreeBSD->u.Phys.cPages) 868 PhysAddr = VM_PAGE_TO_PHYS(pMemFreeBSD->u.Phys.apPages[iPage]); 869 870 return PhysAddr; 871 } 778 872 case RTR0MEMOBJTYPE_RES_VIRT: 779 873 case RTR0MEMOBJTYPE_LOW:
Note:
See TracChangeset
for help on using the changeset viewer.