Changeset 19673 in vbox for trunk/src/VBox/Runtime
- Timestamp:
- May 13, 2009 6:40:01 PM (16 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Runtime/r0drv/freebsd/memobj-r0drv-freebsd.c
r19591 r19673 175 175 { 176 176 int rc; 177 size_t cPages = cb >> PAGE_SHIFT; 177 178 178 179 /* create the object. */ … … 181 182 return VERR_NO_MEMORY; 182 183 183 /* 184 * We've two options here both expressed nicely by how kld allocates 185 * memory for the module bits: 186 * http://fxr.watson.org/fxr/source/kern/link_elf.c?v=RELENG62#L701 187 */ 188 #if 1 189 pMemFreeBSD->Core.pv = contigmalloc(cb, /* size */ 190 M_IPRTMOBJ, /* type */ 191 M_NOWAIT | M_ZERO, /* flags */ 192 0, /* lowest physical address*/ 193 _4G-1, /* highest physical address */ 194 PAGE_SIZE, /* alignment. */ 195 0); /* boundrary */ 196 if (pMemFreeBSD->Core.pv) 197 { 198 *ppMem = &pMemFreeBSD->Core; 199 return VINF_SUCCESS; 200 } 201 rc = VERR_NO_MEMORY; 202 NOREF(fExecutable); 203 204 #else 205 pMemFreeBSD->pObject = vm_object_allocate(OBJT_DEFAULT, cb >> PAGE_SHIFT); 184 pMemFreeBSD->pObject = vm_object_allocate(OBJT_DEFAULT, cPages); 206 185 if (pMemFreeBSD->pObject) 207 186 { … … 220 199 if (rc == KERN_SUCCESS) 221 200 { 222 rc = vm_map_wire(kernel_map, /* map */ 223 MapAddress, /* start */ 224 MapAddress + cb, /* end */ 225 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); 226 if (rc == KERN_SUCCESS) 201 vm_offset_t AddressDst = MapAddress; 202 203 rc = VINF_SUCCESS; 204 205 VM_OBJECT_LOCK(pMemFreeBSD->pObject); 206 for (size_t iPage = 0; iPage < cPages; iPage++) 207 { 208 vm_pindex_t PageIndex = OFF_TO_IDX(AddressDst); 209 vm_page_t pPage; 210 211 pPage = vm_page_alloc(pMemFreeBSD->pObject, PageIndex, 212 VM_ALLOC_NOBUSY | VM_ALLOC_SYSTEM | 213 VM_ALLOC_WIRED); 214 if (pPage) 215 { 216 vm_page_lock_queues(); 217 vm_page_wire(pPage); 218 vm_page_unlock_queues(); 219 /* Put the page into the page table now. */ 220 #if __FreeBSD_version >= 701105 221 pmap_enter(kernel_map->pmap, AddressDst, VM_PROT_NONE, pPage, 222 fExecutable 223 ? VM_PROT_ALL 224 : VM_PROT_RW, 225 TRUE); 226 #else 227 pmap_enter(kernel_map->pmap, AddressDst, pPage, 228 fExecutable 229 ? VM_PROT_ALL 230 : VM_PROT_RW, 231 TRUE); 232 #endif 233 } 234 else 235 { 236 /* 237 * Allocation failed. vm_map_remove will remove any 238 * page already alocated. 239 */ 240 rc = VERR_NO_MEMORY; 241 break; 242 } 243 AddressDst += PAGE_SIZE; 244 } 245 VM_OBJECT_UNLOCK(pMemFreeBSD->pObject); 246 247 if (rc == VINF_SUCCESS) 227 248 { 228 249 pMemFreeBSD->Core.pv = (void *)MapAddress; … … 231 252 } 232 253 233 vm_map_remove(kernel_map,234 MapAddress,235 MapAddress + cb);254 vm_map_remove(kernel_map, 255 MapAddress, 256 MapAddress + cb); 236 257 } 237 258 rc = VERR_NO_MEMORY; /** @todo fix translation (borrow from darwin) */ … … 239 260 else 240 261 rc = VERR_NO_MEMORY; 241 #endif242 262 243 263 rtR0MemObjDelete(&pMemFreeBSD->Core); … … 592 612 PROC_UNLOCK(pProc); 593 613 594 /* 595 * Mapping into R3 is easy if the mem object has a associated VM object. 596 * If there is not such an object we have to get it from the address. 597 */ 598 if (!pObjectToMap) 599 { 600 vm_object_t pObjectNew = vm_object_allocate(OBJT_PHYS, pMemToMap->cb >> PAGE_SHIFT); 601 if (!RT_UNLIKELY(pObjectNew)) 602 return VERR_NO_MEMORY; 603 604 /* Insert the object in the map. */ 605 rc = vm_map_find(pProcMap, /* Map to insert the object in */ 606 pObjectNew , /* Object to map */ 607 0, /* Start offset in the object */ 608 &AddrR3, /* Start address IN/OUT */ 609 pMemToMap->cb, /* Size of the mapping */ 610 TRUE, /* Whether a suitable address should be searched for first */ 611 ProtectionFlags, /* protection flags */ 612 VM_PROT_ALL, /* Maximum protection flags */ 613 0); /* Copy on write */ 614 if (rc == KERN_SUCCESS) 614 vm_object_t pObjectNew = vm_object_allocate(OBJT_PHYS, pMemToMap->cb >> PAGE_SHIFT); 615 if (!RT_UNLIKELY(pObjectNew)) 616 return VERR_NO_MEMORY; 617 618 /* Insert the object in the map. */ 619 rc = vm_map_find(pProcMap, /* Map to insert the object in */ 620 pObjectNew , /* Object to map */ 621 0, /* Start offset in the object */ 622 &AddrR3, /* Start address IN/OUT */ 623 pMemToMap->cb, /* Size of the mapping */ 624 TRUE, /* Whether a suitable address should be searched for first */ 625 ProtectionFlags, /* protection flags */ 626 VM_PROT_ALL, /* Maximum protection flags */ 627 0); /* Copy on write */ 628 629 /* Map the memory page by page into the destination map. */ 630 if (rc == KERN_SUCCESS) 631 { 632 size_t cLeft = pMemToMap->cb >> PAGE_SHIFT; 633 vm_offset_t AddrToMap = (vm_offset_t)pMemToMap->pv; 634 pmap_t pPhysicalMap = pProcMap->pmap; 635 vm_offset_t AddrR3Dst = AddrR3; 636 637 /* Insert the memory page by page into the mapping. */ 638 while (cLeft-- > 0) 615 639 { 616 size_t cLeft = pMemToMap->cb >> PAGE_SHIFT; 617 vm_offset_t AddrToMap = (vm_offset_t)pMemToMap->pv; 618 pmap_t pPhysicalMap = pProcMap->pmap; 619 vm_offset_t AddrR3Dst = AddrR3; 620 621 /* Insert the memory page by page into the mapping. */ 622 while (cLeft-- > 0) 623 { 624 vm_page_t Page = PHYS_TO_VM_PAGE(vtophys(AddrToMap)); 640 vm_page_t Page = PHYS_TO_VM_PAGE(vtophys(AddrToMap)); 625 641 626 642 #if __FreeBSD_version >= 701105 627 643 pmap_enter(pPhysicalMap, AddrR3Dst, VM_PROT_NONE, Page, ProtectionFlags, TRUE); 628 644 #else 629 645 pmap_enter(pPhysicalMap, AddrR3Dst, Page, ProtectionFlags, TRUE); 630 646 #endif 631 AddrToMap += PAGE_SIZE; 632 AddrR3Dst += PAGE_SIZE; 633 } 634 pObjectToMap = pObjectNew; 647 AddrToMap += PAGE_SIZE; 648 AddrR3Dst += PAGE_SIZE; 635 649 } 636 else 637 vm_object_deallocate(pObjectNew); 650 pObjectToMap = pObjectNew; 638 651 } 639 652 else 640 { 641 /* 642 * Reference the object. If this isn't done the object will removed from kernel space 643 * if the mapping is destroyed. 644 */ 645 vm_object_reference(pObjectToMap); 646 647 rc = vm_map_find(pProcMap, /* Map to insert the object in */ 648 pObjectToMap, /* Object to map */ 649 0, /* Start offset in the object */ 650 &AddrR3, /* Start address IN/OUT */ 651 pMemToMap->cb, /* Size of the mapping */ 652 TRUE, /* Whether a suitable address should be searched for first */ 653 ProtectionFlags, /* protection flags */ 654 VM_PROT_ALL, /* Maximum protection flags */ 655 0); /* Copy on write */ 656 } 653 vm_object_deallocate(pObjectNew); 657 654 658 655 if (rc == KERN_SUCCESS)
Note:
See TracChangeset
for help on using the changeset viewer.