- Timestamp:
- Feb 28, 2010 9:44:02 PM (15 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Runtime/r0drv/freebsd/memobj-r0drv-freebsd.c
r26887 r26899 58 58 union 59 59 { 60 /** Everything not physical*/60 /** Non physical memory allocations */ 61 61 struct 62 62 { … … 64 64 vm_object_t pObject; 65 65 } NonPhys; 66 /** Physical contiguous/non-contiguous memory*/66 /** Physical memory allocations */ 67 67 struct 68 68 { 69 /** Number of allocatedpages */69 /** Number of pages */ 70 70 uint32_t cPages; 71 /** Array of allocated pages.*/71 /** Array of pages - variable */ 72 72 vm_page_t apPages[1]; 73 73 } Phys; … … 162 162 case RTR0MEMOBJTYPE_PHYS_NC: 163 163 { 164 vm_page_lock_queues();165 164 for (uint32_t iPage = 0; iPage < pMemFreeBSD->u.Phys.cPages; iPage++) 166 { 167 vm_page_t pPage = pMemFreeBSD->u.Phys.apPages[iPage]; 168 pPage->wire_count--; 169 vm_page_free_toq(pPage); 170 } 171 vm_page_unlock_queues(); 165 vm_page_free_toq(pMemFreeBSD->u.Phys.apPages[iPage]); 172 166 break; 173 167 } … … 211 205 if (rc == KERN_SUCCESS) 212 206 { 213 vm_offset_t AddressDst = MapAddress;214 215 207 rc = VINF_SUCCESS; 216 208 … … 218 210 for (size_t iPage = 0; iPage < cPages; iPage++) 219 211 { 220 vm_pindex_t PageIndex = OFF_TO_IDX(AddressDst);221 212 vm_page_t pPage; 222 213 223 pPage = vm_page_alloc(pMemFreeBSD->u.NonPhys.pObject, PageIndex,214 pPage = vm_page_alloc(pMemFreeBSD->u.NonPhys.pObject, iPage, 224 215 VM_ALLOC_NOBUSY | VM_ALLOC_SYSTEM | 225 216 VM_ALLOC_WIRED); 226 217 227 #if __FreeBSD_version >= 800000 /** @todo Find exact version number */ 228 /* Fixes crashes during VM termination on FreeBSD8-CURRENT amd64 229 * with kernel debugging enabled. */ 230 vm_page_set_valid(pPage, 0, PAGE_SIZE); 218 if (!pPage) 219 { 220 /* 221 * Out of pages 222 * Remove already allocated pages 223 */ 224 while (iPage-- > 0) 225 { 226 vm_map_lock(kernel_map); 227 pPage = vm_page_lookup(pMemFreeBSD->u.NonPhys.pObject, iPage); 228 vm_page_lock_queues(); 229 vm_page_unwire(pPage, 0); 230 vm_page_free(pPage); 231 vm_page_unlock_queues(); 232 } 233 rc = VERR_NO_MEMORY; 234 break; 235 } 236 237 pPage->valid = VM_PAGE_BITS_ALL; 238 } 239 VM_OBJECT_UNLOCK(pMemFreeBSD->u.NonPhys.pObject); 240 241 if (rc == VINF_SUCCESS) 242 { 243 vm_map_entry_t pMapEntry; 244 boolean_t fEntryFound; 245 246 fEntryFound = vm_map_lookup_entry(kernel_map, MapAddress, &pMapEntry); 247 if (fEntryFound) 248 { 249 pMapEntry->wired_count = 1; 250 vm_map_simplify_entry(kernel_map, pMapEntry); 251 252 /* Put the page into the page table now. */ 253 VM_OBJECT_LOCK(pMemFreeBSD->u.NonPhys.pObject); 254 vm_offset_t AddressDst = MapAddress; 255 256 for (size_t iPage = 0; iPage < cPages; iPage++) 257 { 258 vm_page_t pPage; 259 260 pPage = vm_page_lookup(pMemFreeBSD->u.NonPhys.pObject, iPage); 261 262 #if __FreeBSD_version >= 701105 263 pmap_enter(kernel_map->pmap, AddressDst, VM_PROT_NONE, pPage, 264 fExecutable 265 ? VM_PROT_ALL 266 : VM_PROT_RW, 267 TRUE); 268 #else 269 pmap_enter(kernel_map->pmap, AddressDst, pPage, 270 fExecutable 271 ? VM_PROT_ALL 272 : VM_PROT_RW, 273 TRUE); 231 274 #endif 232 275 233 if (pPage) 234 { 235 vm_page_lock_queues(); 236 vm_page_wire(pPage); 237 vm_page_unlock_queues(); 238 /* Put the page into the page table now. */ 239 #if __FreeBSD_version >= 701105 240 pmap_enter(kernel_map->pmap, AddressDst, VM_PROT_NONE, pPage, 241 fExecutable 242 ? VM_PROT_ALL 243 : VM_PROT_RW, 244 TRUE); 245 #else 246 pmap_enter(kernel_map->pmap, AddressDst, pPage, 247 fExecutable 248 ? VM_PROT_ALL 249 : VM_PROT_RW, 250 TRUE); 251 #endif 276 AddressDst += PAGE_SIZE; 277 } 278 VM_OBJECT_UNLOCK(pMemFreeBSD->u.NonPhys.pObject); 279 280 /* Store start address */ 281 pMemFreeBSD->Core.pv = (void *)MapAddress; 282 *ppMem = &pMemFreeBSD->Core; 283 return VINF_SUCCESS; 252 284 } 253 285 else 254 286 { 255 /* 256 * Allocation failed. vm_map_remove will remove any 257 * page already alocated. 258 */ 259 rc = VERR_NO_MEMORY; 260 break; 287 AssertFailed(); 261 288 } 262 AddressDst += PAGE_SIZE; 263 } 264 VM_OBJECT_UNLOCK(pMemFreeBSD->u.NonPhys.pObject); 265 266 if (rc == VINF_SUCCESS) 267 { 268 pMemFreeBSD->Core.pv = (void *)MapAddress; 269 *ppMem = &pMemFreeBSD->Core; 270 return VINF_SUCCESS; 271 } 272 273 vm_map_remove(kernel_map, 274 MapAddress, 275 MapAddress + cb); 289 } 276 290 } 277 291 rc = VERR_NO_MEMORY; /** @todo fix translation (borrow from darwin) */ … … 348 362 349 363 /* create the object. */ 350 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJFREEBSD, u.Phys.apPages[cPages]), enmType, NULL, cb); 364 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJFREEBSD, u.Phys.apPages[cPages]), 365 enmType, NULL, cb); 351 366 if (!pMemFreeBSD) 352 367 return VERR_NO_MEMORY; … … 354 369 pMemFreeBSD->u.Phys.cPages = cPages; 355 370 356 /*357 * For now allocate contiguous pages358 * if there is an upper limit or359 * the alignment is not on a page boundary.360 */361 371 if (PhysHighest != NIL_RTHCPHYS) 362 {363 372 VmPhysAddrHigh = PhysHighest; 364 fContiguous = true;365 }366 373 else 367 374 VmPhysAddrHigh = ~(vm_paddr_t)0; 368 375 369 if (uAlignment != PAGE_SIZE)370 fContiguous = true;371 372 mtx_lock(&vm_page_queue_free_mtx);373 376 if (fContiguous) 374 377 { … … 377 380 if (pPage) 378 381 for (uint32_t iPage = 0; iPage < cPages; iPage++) 379 {380 pPage[iPage].flags &= ~PG_FREE;381 pPage[iPage].wire_count = 1;382 atomic_add_int(&cnt.v_wire_count, 1);383 382 pMemFreeBSD->u.Phys.apPages[iPage] = &pPage[iPage]; 384 }385 383 else 386 384 rc = VERR_NO_MEMORY; … … 391 389 for (uint32_t iPage = 0; iPage < cPages; iPage++) 392 390 { 393 vm_page_t pPage = vm_phys_alloc_ pages(VM_FREEPOOL_DEFAULT, 0);391 vm_page_t pPage = vm_phys_alloc_contig(1, 0, VmPhysAddrHigh, uAlignment, 0); 394 392 395 393 if (!pPage) 396 394 { 397 vm_page_lock_queues();395 /* Free all allocated pages */ 398 396 while (iPage-- > 0) 399 {400 pMemFreeBSD->u.Phys.apPages[iPage]->wire_count--;401 397 vm_page_free_toq(pMemFreeBSD->u.Phys.apPages[iPage]); 402 }403 vm_page_unlock_queues();404 405 398 rc = VERR_NO_MEMORY; 406 399 break; 407 400 } 408 409 pPage->flags &= ~PG_FREE;410 pPage->valid = VM_PAGE_BITS_ALL;411 pPage->wire_count = 1;412 atomic_add_int(&cnt.v_wire_count, 1);413 401 pMemFreeBSD->u.Phys.apPages[iPage] = pPage; 414 402 } 415 403 } 416 mtx_unlock(&vm_page_queue_free_mtx);417 404 418 405 if (RT_FAILURE(rc)) … … 434 421 int rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment) 435 422 { 423 #if 0 436 424 return rtR0MemObjFreeBSDAllocPhysPages(ppMem, RTR0MEMOBJTYPE_PHYS, cb, PhysHighest, uAlignment, true); 425 #else 426 /* create the object. */ 427 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_CONT, NULL, cb); 428 if (!pMemFreeBSD) 429 return VERR_NO_MEMORY; 430 431 /* do the allocation. */ 432 pMemFreeBSD->Core.pv = contigmalloc(cb, /* size */ 433 M_IPRTMOBJ, /* type */ 434 M_NOWAIT | M_ZERO, /* flags */ 435 0, /* lowest physical address*/ 436 _4G-1, /* highest physical address */ 437 uAlignment, /* alignment. */ 438 0); /* boundrary */ 439 if (pMemFreeBSD->Core.pv) 440 { 441 pMemFreeBSD->Core.u.Cont.Phys = vtophys(pMemFreeBSD->Core.pv); 442 *ppMem = &pMemFreeBSD->Core; 443 return VINF_SUCCESS; 444 } 445 446 rtR0MemObjDelete(&pMemFreeBSD->Core); 447 return VERR_NO_MEMORY; 448 #endif 437 449 } 438 450 … … 440 452 int rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest) 441 453 { 454 #if 0 442 455 return rtR0MemObjFreeBSDAllocPhysPages(ppMem, RTR0MEMOBJTYPE_PHYS_NC, cb, PhysHighest, PAGE_SIZE, false); 456 #else 457 return VERR_NOT_SUPPORTED; 458 #endif 443 459 } 444 460 … … 617 633 return VERR_NOT_SUPPORTED; 618 634 619 620 621 635 /* Phys: see pmap_mapdev in i386/i386/pmap.c (http://fxr.watson.org/fxr/source/i386/i386/pmap.c?v=RELENG62#L2860) */ 622 623 #if 0624 636 /** @todo finish the implementation. */ 625 637 626 int rc;627 void *pvR0 = NULL;628 PRTR0MEMOBJFREEBSD pMemToMapOs2 = (PRTR0MEMOBJFREEBSD)pMemToMap;629 switch (pMemToMapOs2->Core.enmType)630 {631 /*632 * These has kernel mappings.633 */634 case RTR0MEMOBJTYPE_PAGE:635 case RTR0MEMOBJTYPE_LOW:636 case RTR0MEMOBJTYPE_CONT:637 pvR0 = pMemToMapOs2->Core.pv;638 break;639 640 case RTR0MEMOBJTYPE_PHYS_NC:641 case RTR0MEMOBJTYPE_PHYS:642 pvR0 = pMemToMapOs2->Core.pv;643 if (!pvR0)644 {645 /* no ring-0 mapping, so allocate a mapping in the process. */646 AssertMsgReturn(uAlignment == PAGE_SIZE, ("%#zx\n", uAlignment), VERR_NOT_SUPPORTED);647 AssertMsgReturn(fProt & RTMEM_PROT_WRITE, ("%#x\n", fProt), VERR_NOT_SUPPORTED);648 Assert(!pMemToMapOs2->Core.u.Phys.fAllocated);649 ULONG ulPhys = pMemToMapOs2->Core.u.Phys.PhysBase;650 rc = KernVMAlloc(pMemToMapOs2->Core.cb, VMDHA_PHYS, &pvR0, (PPVOID)&ulPhys, NULL);651 if (rc)652 return RTErrConvertFromOS2(rc);653 pMemToMapOs2->Core.pv = pvR0;654 }655 break;656 657 case RTR0MEMOBJTYPE_LOCK:658 if (pMemToMapOs2->Core.u.Lock.R0Process != NIL_RTR0PROCESS)659 return VERR_NOT_SUPPORTED; /** @todo implement this... */660 pvR0 = pMemToMapOs2->Core.pv;661 break;662 663 case RTR0MEMOBJTYPE_RES_VIRT:664 case RTR0MEMOBJTYPE_MAPPING:665 default:666 AssertMsgFailed(("enmType=%d\n", pMemToMapOs2->Core.enmType));667 return VERR_INTERNAL_ERROR;668 }669 670 /*671 * Create a dummy mapping object for it.672 *673 * All mappings are read/write/execute in OS/2 and there isn't674 * any cache options, so sharing is ok. And the main memory object675 * isn't actually freed until all the mappings have been freed up676 * (reference counting).677 */678 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_MAPPING, pvR0, pMemToMapOs2->Core.cb);679 if (pMemFreeBSD)680 {681 pMemFreeBSD->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;682 *ppMem = &pMemFreeBSD->Core;683 return VINF_SUCCESS;684 }685 return VERR_NO_MEMORY;686 #endif687 638 return VERR_NOT_IMPLEMENTED; 688 639 } … … 721 672 PROC_UNLOCK(pProc); 722 673 723 vm_object_t pObjectNew = vm_object_allocate(OBJT_ PHYS, pMemToMap->cb >> PAGE_SHIFT);674 vm_object_t pObjectNew = vm_object_allocate(OBJT_DEFAULT, pMemToMap->cb >> PAGE_SHIFT); 724 675 if (!RT_UNLIKELY(pObjectNew)) 725 676 return VERR_NO_MEMORY; … … 739 690 if (rc == KERN_SUCCESS) 740 691 { 741 size_t cPages; 742 vm_offset_t AddrToMap = (vm_offset_t)pMemToMap->pv; 692 size_t cPages = pMemToMap->cb >> PAGE_SHIFT;; 743 693 pmap_t pPhysicalMap = pProcMap->pmap; 744 694 vm_offset_t AddrR3Dst = AddrR3; … … 746 696 if ( pMemToMap->enmType == RTR0MEMOBJTYPE_PHYS 747 697 || pMemToMap->enmType == RTR0MEMOBJTYPE_PHYS_NC) 748 cPages = pMemToMapFreeBSD->u.Phys.cPages; 698 { 699 /* Mapping physical allocations */ 700 Assert(cPages == pMemToMap->u.Phys.cPages); 701 702 /* Insert the memory page by page into the mapping. */ 703 for (uint32_t iPage = 0; iPage < cPages; iPage++) 704 { 705 vm_page_t pPage = pMemToMapFreeBSD->u.Phys.apPages[iPage]; 706 707 #if __FreeBSD_version >= 701105 708 pmap_enter(pPhysicalMap, AddrR3Dst, VM_PROT_NONE, pPage, ProtectionFlags, TRUE); 709 #else 710 pmap_enter(pPhysicalMap, AddrR3Dst, pPage, ProtectionFlags, TRUE); 711 #endif 712 AddrR3Dst += PAGE_SIZE; 713 } 714 } 715 else if (pMemToMapFreeBSD->u.NonPhys.pObject) 716 { 717 /* Mapping page memory object */ 718 VM_OBJECT_LOCK(pMemToMapFreeBSD->u.NonPhys.pObject); 719 720 /* Insert the memory page by page into the mapping. */ 721 for (uint32_t iPage = 0; iPage < cPages; iPage++) 722 { 723 vm_page_t pPage = vm_page_lookup(pMemToMapFreeBSD->u.NonPhys.pObject, iPage); 724 725 #if __FreeBSD_version >= 701105 726 pmap_enter(pPhysicalMap, AddrR3Dst, VM_PROT_NONE, pPage, ProtectionFlags, TRUE); 727 #else 728 pmap_enter(pPhysicalMap, AddrR3Dst, pPage, ProtectionFlags, TRUE); 729 #endif 730 AddrR3Dst += PAGE_SIZE; 731 } 732 VM_OBJECT_UNLOCK(pMemToMapFreeBSD->u.NonPhys.pObject); 733 } 749 734 else 750 cPages = pMemToMap->cb >> PAGE_SHIFT; 751 752 /* Insert the memory page by page into the mapping. */ 753 for (uint32_t iPage = 0; iPage < cPages; iPage++) 754 { 755 vm_page_t pPage; 756 757 if ( pMemToMap->enmType == RTR0MEMOBJTYPE_PHYS 758 || pMemToMap->enmType == RTR0MEMOBJTYPE_PHYS_NC) 759 pPage = pMemToMapFreeBSD->u.Phys.apPages[iPage]; 760 else 761 { 762 pPage = PHYS_TO_VM_PAGE(vtophys(AddrToMap)); 735 { 736 /* Mapping cont or low memory types */ 737 vm_offset_t AddrToMap = (vm_offset_t)pMemToMap->pv; 738 739 for (uint32_t iPage = 0; iPage < cPages; iPage++) 740 { 741 vm_page_t pPage = PHYS_TO_VM_PAGE(vtophys(AddrToMap)); 742 743 #if __FreeBSD_version >= 701105 744 pmap_enter(pPhysicalMap, AddrR3Dst, VM_PROT_NONE, pPage, ProtectionFlags, TRUE); 745 #else 746 pmap_enter(pPhysicalMap, AddrR3Dst, pPage, ProtectionFlags, TRUE); 747 #endif 748 AddrR3Dst += PAGE_SIZE; 763 749 AddrToMap += PAGE_SIZE; 764 750 } 765 766 #if __FreeBSD_version >= 701105 767 pmap_enter(pPhysicalMap, AddrR3Dst, VM_PROT_NONE, pPage, ProtectionFlags, TRUE); 768 #else 769 pmap_enter(pPhysicalMap, AddrR3Dst, pPage, ProtectionFlags, TRUE); 770 #endif 771 AddrR3Dst += PAGE_SIZE; 772 } 773 } 774 else 775 vm_object_deallocate(pObjectNew); 776 777 if (rc == KERN_SUCCESS) 751 } 752 } 753 754 if (RT_SUCCESS(rc)) 778 755 { 779 756 /* … … 787 764 { 788 765 Assert((vm_offset_t)pMemFreeBSD->Core.pv == AddrR3); 766 pMemFreeBSD->u.NonPhys.pObject = pObjectNew; 789 767 pMemFreeBSD->Core.u.Mapping.R0Process = R0Process; 790 768 *ppMem = &pMemFreeBSD->Core; … … 795 773 AssertMsg(rc == KERN_SUCCESS, ("Deleting mapping failed\n")); 796 774 } 775 776 if (RT_FAILURE(rc)) 777 vm_object_deallocate(pObjectNew); 797 778 798 779 return VERR_NO_MEMORY; … … 863 844 case RTR0MEMOBJTYPE_PHYS_NC: 864 845 { 865 RTHCPHYS PhysAddr = NIL_RTHCPHYS; 866 867 if (iPage < pMemFreeBSD->u.Phys.cPages) 868 PhysAddr = VM_PAGE_TO_PHYS(pMemFreeBSD->u.Phys.apPages[iPage]); 869 870 return PhysAddr; 871 } 846 return VM_PAGE_TO_PHYS(pMemFreeBSD->u.Phys.apPages[iPage]); 847 } 848 872 849 case RTR0MEMOBJTYPE_RES_VIRT: 873 850 case RTR0MEMOBJTYPE_LOW:
Note:
See TracChangeset
for help on using the changeset viewer.