Changeset 92246 in vbox for trunk/src/VBox/Runtime/r0drv
- Timestamp:
- Nov 6, 2021 3:10:49 AM (3 years ago)
- svn:sync-xref-src-repo-rev:
- 148086
- Location:
- trunk/src/VBox/Runtime/r0drv
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Runtime/r0drv/darwin/memobj-r0drv-darwin.cpp
r91483 r92246 744 744 if (pMemDarwin) 745 745 { 746 if (fOptions & kIOMemoryKernelUserShared) 747 pMemDarwin->Core.fFlags |= RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC; 748 else 749 pMemDarwin->Core.fFlags |= RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC; 746 750 if (fContiguous) 747 751 { -
trunk/src/VBox/Runtime/r0drv/freebsd/memobj-r0drv-freebsd.c
r91483 r92246 230 230 { 231 231 VM_OBJECT_WLOCK(pObject); 232 pPages = vm_page_alloc_contig(pObject, iPIndex, fFlags, cPages, 0, 233 VmPhysAddrHigh, uAlignment, 0, VM_MEMATTR_DEFAULT); 232 pPages = vm_page_alloc_contig(pObject, iPIndex, fFlags, cPages, 0, VmPhysAddrHigh, uAlignment, 0, VM_MEMATTR_DEFAULT); 234 233 VM_OBJECT_WUNLOCK(pObject); 235 234 if (pPages) … … 280 279 if (fContiguous) 281 280 { 282 if (rtR0MemObjFreeBSDContigPhysAllocHelper(pObject, 0, cPages, VmPhysAddrHigh, 283 uAlignment, fWire) != NULL) 281 if (rtR0MemObjFreeBSDContigPhysAllocHelper(pObject, 0, cPages, VmPhysAddrHigh, uAlignment, fWire) != NULL) 284 282 return VINF_SUCCESS; 285 283 return rcNoMem; … … 288 286 for (vm_pindex_t iPage = 0; iPage < cPages; iPage++) 289 287 { 290 vm_page_t pPage = rtR0MemObjFreeBSDContigPhysAllocHelper(pObject, iPage, 1, VmPhysAddrHigh, 291 uAlignment, fWire); 292 if (!pPage) 288 vm_page_t pPage = rtR0MemObjFreeBSDContigPhysAllocHelper(pObject, iPage, 1, VmPhysAddrHigh, uAlignment, fWire); 289 if (pPage) 290 { /* likely */ } 291 else 293 292 { 294 293 /* Free all allocated pages */ … … 336 335 if (rc == KERN_SUCCESS) 337 336 { 338 rc = rtR0MemObjFreeBSDPhysAllocHelper(pMemFreeBSD->pObject, cPages, 339 VmPhysAddrHigh, PAGE_SIZE, fContiguous, 340 false, rcNoMem); 337 rc = rtR0MemObjFreeBSDPhysAllocHelper(pMemFreeBSD->pObject, cPages, VmPhysAddrHigh, PAGE_SIZE, 338 fContiguous, false /*fWire*/, rcNoMem); 341 339 if (RT_SUCCESS(rc)) 342 340 { 343 vm_map_wire(kernel_map, MapAddress, MapAddress + pMemFreeBSD->Core.cb, 344 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); 341 vm_map_wire(kernel_map, MapAddress, MapAddress + pMemFreeBSD->Core.cb, VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); 345 342 346 343 /* Store start address */ 347 pMemFreeBSD->Core.pv = (void *)MapAddress; 344 pMemFreeBSD->Core.pv = (void *)MapAddress; 345 pMemFreeBSD->Core.fFlags |= RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC; 348 346 return VINF_SUCCESS; 349 347 } … … 366 364 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_PAGE, 367 365 NULL, cb, pszTag); 368 if ( !pMemFreeBSD)369 return VERR_NO_MEMORY;370 371 int rc = rtR0MemObjFreeBSDAllocHelper(pMemFreeBSD, fExecutable, ~(vm_paddr_t)0, false, VERR_NO_MEMORY);372 if (RT_FAILURE(rc))373 {374 rtR0MemObjDelete(&pMemFreeBSD->Core);366 if (pMemFreeBSD) 367 { 368 int rc = rtR0MemObjFreeBSDAllocHelper(pMemFreeBSD, fExecutable, ~(vm_paddr_t)0, false /*fContiguous*/, VERR_NO_MEMORY); 369 if (RT_SUCCESS(rc)) 370 *ppMem = &pMemFreeBSD->Core; 371 else 372 rtR0MemObjDelete(&pMemFreeBSD->Core); 375 373 return rc; 376 374 } 377 378 *ppMem = &pMemFreeBSD->Core; 379 return rc; 375 return VERR_NO_MEMORY; 380 376 } 381 377 … … 391 387 { 392 388 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_LOW, NULL, cb, pszTag); 393 if ( !pMemFreeBSD)394 return VERR_NO_MEMORY;395 396 int rc = rtR0MemObjFreeBSDAllocHelper(pMemFreeBSD, fExecutable, _4G - 1, false, VERR_NO_LOW_MEMORY);397 if (RT_FAILURE(rc))398 {399 rtR0MemObjDelete(&pMemFreeBSD->Core);389 if (pMemFreeBSD) 390 { 391 int rc = rtR0MemObjFreeBSDAllocHelper(pMemFreeBSD, fExecutable, _4G - 1, false /*fContiguous*/, VERR_NO_LOW_MEMORY); 392 if (RT_SUCCESS(rc)) 393 *ppMem = &pMemFreeBSD->Core; 394 else 395 rtR0MemObjDelete(&pMemFreeBSD->Core); 400 396 return rc; 401 397 } 402 403 *ppMem = &pMemFreeBSD->Core; 404 return rc; 398 return VERR_NO_MEMORY; 405 399 } 406 400 … … 410 404 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_CONT, 411 405 NULL, cb, pszTag); 412 if (!pMemFreeBSD) 413 return VERR_NO_MEMORY; 414 415 int rc = rtR0MemObjFreeBSDAllocHelper(pMemFreeBSD, fExecutable, _4G - 1, true, VERR_NO_CONT_MEMORY); 416 if (RT_FAILURE(rc)) 417 { 418 rtR0MemObjDelete(&pMemFreeBSD->Core); 406 if (pMemFreeBSD) 407 { 408 int rc = rtR0MemObjFreeBSDAllocHelper(pMemFreeBSD, fExecutable, _4G - 1, true /*fContiguous*/, VERR_NO_CONT_MEMORY); 409 if (RT_SUCCESS(rc)) 410 { 411 pMemFreeBSD->Core.u.Cont.Phys = vtophys(pMemFreeBSD->Core.pv); 412 *ppMem = &pMemFreeBSD->Core; 413 } 414 else 415 rtR0MemObjDelete(&pMemFreeBSD->Core); 419 416 return rc; 420 417 } 421 422 pMemFreeBSD->Core.u.Cont.Phys = vtophys(pMemFreeBSD->Core.pv); 423 *ppMem = &pMemFreeBSD->Core; 424 return rc; 418 return VERR_NO_MEMORY; 425 419 } 426 420 … … 429 423 size_t uAlignment, bool fContiguous, int rcNoMem, const char *pszTag) 430 424 { 431 uint32_t cPages = atop(cb);432 vm_paddr_t VmPhysAddrHigh;433 434 425 /* create the object. */ 435 426 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), enmType, NULL, cb, pszTag); 436 if (!pMemFreeBSD) 437 return VERR_NO_MEMORY; 438 439 pMemFreeBSD->pObject = vm_object_allocate(OBJT_PHYS, atop(cb)); 440 441 if (PhysHighest != NIL_RTHCPHYS) 442 VmPhysAddrHigh = PhysHighest; 443 else 444 VmPhysAddrHigh = ~(vm_paddr_t)0; 445 446 int rc = rtR0MemObjFreeBSDPhysAllocHelper(pMemFreeBSD->pObject, cPages, VmPhysAddrHigh, 447 uAlignment, fContiguous, true, rcNoMem); 448 if (RT_SUCCESS(rc)) 449 { 450 if (fContiguous) 451 { 452 Assert(enmType == RTR0MEMOBJTYPE_PHYS); 453 VM_OBJECT_WLOCK(pMemFreeBSD->pObject); 454 pMemFreeBSD->Core.u.Phys.PhysBase = VM_PAGE_TO_PHYS(vm_page_find_least(pMemFreeBSD->pObject, 0)); 455 VM_OBJECT_WUNLOCK(pMemFreeBSD->pObject); 456 pMemFreeBSD->Core.u.Phys.fAllocated = true; 457 } 458 459 *ppMem = &pMemFreeBSD->Core; 460 } 461 else 462 { 463 vm_object_deallocate(pMemFreeBSD->pObject); 464 rtR0MemObjDelete(&pMemFreeBSD->Core); 465 } 466 467 return rc; 427 if (pMemFreeBSD) 428 { 429 vm_paddr_t const VmPhysAddrHigh = PhysHighest != NIL_RTHCPHYS ? PhysHighest : ~(vm_paddr_t)0; 430 u_long const cPages = atop(cb); 431 432 pMemFreeBSD->pObject = vm_object_allocate(OBJT_PHYS, cPages); 433 434 int rc = rtR0MemObjFreeBSDPhysAllocHelper(pMemFreeBSD->pObject, cPages, VmPhysAddrHigh, 435 uAlignment, fContiguous, true, rcNoMem); 436 if (RT_SUCCESS(rc)) 437 { 438 if (fContiguous) 439 { 440 Assert(enmType == RTR0MEMOBJTYPE_PHYS); 441 VM_OBJECT_WLOCK(pMemFreeBSD->pObject); 442 pMemFreeBSD->Core.u.Phys.PhysBase = VM_PAGE_TO_PHYS(vm_page_find_least(pMemFreeBSD->pObject, 0)); 443 VM_OBJECT_WUNLOCK(pMemFreeBSD->pObject); 444 pMemFreeBSD->Core.u.Phys.fAllocated = true; 445 } 446 447 pMemFreeBSD->Core.fFlags |= RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC; 448 *ppMem = &pMemFreeBSD->Core; 449 } 450 else 451 { 452 vm_object_deallocate(pMemFreeBSD->pObject); 453 rtR0MemObjDelete(&pMemFreeBSD->Core); 454 } 455 return rc; 456 } 457 return VERR_NO_MEMORY; 468 458 } 469 459 … … 491 481 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_PHYS, 492 482 NULL, cb, pszTag); 493 if (!pMemFreeBSD) 494 return VERR_NO_MEMORY; 495 496 /* there is no allocation here, it needs to be mapped somewhere first. */ 497 pMemFreeBSD->Core.u.Phys.fAllocated = false; 498 pMemFreeBSD->Core.u.Phys.PhysBase = Phys; 499 pMemFreeBSD->Core.u.Phys.uCachePolicy = uCachePolicy; 500 *ppMem = &pMemFreeBSD->Core; 501 return VINF_SUCCESS; 483 if (pMemFreeBSD) 484 { 485 /* there is no allocation here, it needs to be mapped somewhere first. */ 486 pMemFreeBSD->Core.u.Phys.fAllocated = false; 487 pMemFreeBSD->Core.u.Phys.PhysBase = Phys; 488 pMemFreeBSD->Core.u.Phys.uCachePolicy = uCachePolicy; 489 *ppMem = &pMemFreeBSD->Core; 490 return VINF_SUCCESS; 491 } 492 return VERR_NO_MEMORY; 502 493 } 503 494 -
trunk/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c
r91483 r92246 353 353 if (!pMemLnx) 354 354 return VERR_NO_MEMORY; 355 pMemLnx->Core.fFlags |= RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC; 355 356 pMemLnx->cPages = cPages; 356 357 -
trunk/src/VBox/Runtime/r0drv/memobj-r0drv.cpp
r91483 r92246 313 313 314 314 /** 315 * Checks whether the allocation was zero initialized or not. 316 * 317 * This only works on allocations. It is not meaningful for mappings, reserved 318 * memory and entered physical address, and will return false for these. 319 * 320 * @returns true if the allocation was initialized to zero at allocation time, 321 * false if not or query not meaningful to the object type. 322 * @param hMemObj The ring-0 memory object to be freed. 323 * 324 * @remarks It can be expected that memory allocated in the same fashion will 325 * have the same initialization state. So, if this returns true for 326 * one allocation it will return true for all other similarly made 327 * allocations. 328 */ 329 RTR0DECL(bool) RTR0MemObjWasZeroInitialized(PRTR0MEMOBJ hMemObj) 330 { 331 PRTR0MEMOBJINTERNAL pMem; 332 333 /* Validate the object handle. */ 334 if (RT_UNLIKELY(hMemObj == NIL_RTR0MEMOBJ)) 335 return false; 336 AssertPtrReturn(hMemObj, false); 337 pMem = (PRTR0MEMOBJINTERNAL)hMemObj; 338 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), false); 339 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), false); 340 Assert( (pMem->fFlags & (RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC | RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC)) 341 != (RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC | RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC)); 342 343 /* return the alloc init state. */ 344 return (pMem->fFlags & (RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC | RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC)) 345 == RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC; 346 347 } 348 RT_EXPORT_SYMBOL(RTR0MemObjWasZeroInitialized); 349 350 351 /** 315 352 * Frees a ring-0 memory object. 316 353 * 317 354 * @returns IPRT status code. 318 355 * @retval VERR_INVALID_HANDLE if 319 * @param MemObj The ring-0 memory object to be freed. NULL is accepted. 356 * @param MemObj The ring-0 memory object to be freed. NIL is 357 * accepted. 320 358 * @param fFreeMappings Whether or not to free mappings of the object. 321 359 */ -
trunk/src/VBox/Runtime/r0drv/netbsd/memobj-r0drv-netbsd.c
r91483 r92246 189 189 190 190 static int rtR0MemObjNetBSDAllocHelper(PRTR0MEMOBJNETBSD pMemNetBSD, size_t cb, bool fExecutable, 191 191 paddr_t VmPhysAddrHigh, bool fContiguous) 192 192 { 193 193 /* Virtual space first */ 194 vaddr_t virt = uvm_km_alloc(kernel_map, cb, 0, 195 UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_CANFAIL); 194 vaddr_t virt = uvm_km_alloc(kernel_map, cb, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_CANFAIL); 196 195 if (virt == 0) 197 196 return VERR_NO_MEMORY; … … 202 201 203 202 /* Physical pages */ 204 if (uvm_pglistalloc(cb, 0, VmPhysAddrHigh, 205 PAGE_SIZE, 0, rlist, nsegs, 1) != 0) 203 if (uvm_pglistalloc(cb, 0, VmPhysAddrHigh, PAGE_SIZE, 0, rlist, nsegs, 1) != 0) 206 204 { 207 205 uvm_km_free(kernel_map, virt, cb, UVM_KMF_VAONLY); 208 return VERR_NO_MEMORY; 206 return VERR_NO_MEMORY; /** @todo inaccurate status code */ 209 207 } 210 208 … … 221 219 } 222 220 221 pMemNetBSD->Core.fFlags |= RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC; /*?*/ 223 222 pMemNetBSD->Core.pv = (void *)virt; 224 223 if (fContiguous) … … 234 233 { 235 234 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_PAGE, NULL, cb, pszTag); 236 if (!pMemNetBSD) 237 return VERR_NO_MEMORY; 238 239 void *pvMem = kmem_alloc(cb, KM_SLEEP); 240 if (RT_UNLIKELY(!pvMem)) 241 { 235 if (pMemNetBSD) 236 { 237 void *pvMem = kmem_alloc(cb, KM_SLEEP); 238 if (pvMem) 239 { 240 if (fExecutable) 241 pmap_protect(pmap_kernel(), (vaddr_t)pvMem, (vaddr_t)pvMem + cb, VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE); 242 243 pMemNetBSD->Core.fFlags |= RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC; 244 pMemNetBSD->Core.pv = pvMem; 245 *ppMem = &pMemNetBSD->Core; 246 return VINF_SUCCESS; 247 } 242 248 rtR0MemObjDelete(&pMemNetBSD->Core); 243 249 return VERR_NO_PAGE_MEMORY; 244 250 } 245 if (fExecutable) 246 { 247 pmap_protect(pmap_kernel(), (vaddr_t)pvMem, ((vaddr_t)pvMem) + cb, 248 VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE); 249 } 250 251 pMemNetBSD->Core.pv = pvMem; 252 *ppMem = &pMemNetBSD->Core; 253 return VINF_SUCCESS; 251 return VERR_NO_MEMORY; 254 252 } 255 253 … … 265 263 { 266 264 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_LOW, NULL, cb, pszTag); 267 if (!pMemNetBSD) 268 return VERR_NO_MEMORY; 269 270 int rc = rtR0MemObjNetBSDAllocHelper(pMemNetBSD, cb, fExecutable, _4G - 1, false); 271 if (rc) 272 { 265 if (pMemNetBSD) 266 { 267 int rc = rtR0MemObjNetBSDAllocHelper(pMemNetBSD, cb, fExecutable, _4G - 1, false /*fContiguous*/); 268 if (RT_SUCCESS(rc)) 269 { 270 *ppMem = &pMemNetBSD->Core; 271 return VINF_SUCCESS; 272 } 273 273 rtR0MemObjDelete(&pMemNetBSD->Core); 274 274 return rc; 275 275 } 276 277 *ppMem = &pMemNetBSD->Core; 278 return VINF_SUCCESS; 276 return VERR_NO_MEMORY; 279 277 } 280 278 … … 283 281 { 284 282 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_CONT, NULL, cb, pszTag); 285 if (!pMemNetBSD) 286 return VERR_NO_MEMORY; 287 288 int rc = rtR0MemObjNetBSDAllocHelper(pMemNetBSD, cb, fExecutable, _4G - 1, true); 289 if (rc) 290 { 283 if (pMemNetBSD) 284 { 285 int rc = rtR0MemObjNetBSDAllocHelper(pMemNetBSD, cb, fExecutable, _4G - 1, true /*fContiguous*/); 286 if (RT_SUCCESS(rc)) 287 { 288 *ppMem = &pMemNetBSD->Core; 289 return VINF_SUCCESS; 290 } 291 291 rtR0MemObjDelete(&pMemNetBSD->Core); 292 292 return rc; 293 293 } 294 295 *ppMem = &pMemNetBSD->Core; 296 return VINF_SUCCESS; 294 return VERR_NO_MEMORY; 297 295 } 298 296 … … 301 299 RTHCPHYS PhysHighest, size_t uAlignment, bool fContiguous, const char *pszTag) 302 300 { 303 paddr_t VmPhysAddrHigh;304 305 301 /* create the object. */ 306 302 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), enmType, NULL, cb, pszTag); 307 if (!pMemNetBSD) 308 return VERR_NO_MEMORY; 309 310 if (PhysHighest != NIL_RTHCPHYS) 311 VmPhysAddrHigh = PhysHighest; 312 else 313 VmPhysAddrHigh = ~(paddr_t)0; 314 315 int nsegs = fContiguous ? 1 : INT_MAX; 316 317 int error = uvm_pglistalloc(cb, 0, VmPhysAddrHigh, uAlignment, 0, &pMemNetBSD->pglist, nsegs, 1); 318 if (error) 319 { 303 if (pMemNetBSD) 304 { 305 paddr_t const VmPhysAddrHigh = PhysHighest != NIL_RTHCPHYS ? PhysHighest : ~(paddr_t)0; 306 int const nsegs = fContiguous ? 1 : INT_MAX; 307 int rc = uvm_pglistalloc(cb, 0, VmPhysAddrHigh, uAlignment, 0, &pMemNetBSD->pglist, nsegs, 1); 308 if (!rc) 309 { 310 pMemNetBSD->Core.fFlags |= RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC; /*?*/ 311 if (fContiguous) 312 { 313 Assert(enmType == RTR0MEMOBJTYPE_PHYS); 314 const struct vm_page * const pg = TAILQ_FIRST(&pMemNetBSD->pglist); 315 pMemNetBSD->Core.u.Phys.PhysBase = VM_PAGE_TO_PHYS(pg); 316 pMemNetBSD->Core.u.Phys.fAllocated = true; 317 } 318 *ppMem = &pMemNetBSD->Core; 319 return VINF_SUCCESS; 320 } 320 321 rtR0MemObjDelete(&pMemNetBSD->Core); 321 return VERR_NO_MEMORY; 322 } 323 324 if (fContiguous) 325 { 326 Assert(enmType == RTR0MEMOBJTYPE_PHYS); 327 const struct vm_page * const pg = TAILQ_FIRST(&pMemNetBSD->pglist); 328 pMemNetBSD->Core.u.Phys.PhysBase = VM_PAGE_TO_PHYS(pg); 329 pMemNetBSD->Core.u.Phys.fAllocated = true; 330 } 331 *ppMem = &pMemNetBSD->Core; 332 333 return VINF_SUCCESS; 322 return VERR_NO_PAGE_MEMORY; 323 } 324 return VERR_NO_MEMORY; 334 325 } 335 326 … … 355 346 /* create the object. */ 356 347 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_PHYS, NULL, cb, pszTag); 357 if (!pMemNetBSD) 358 return VERR_NO_MEMORY; 359 360 /* there is no allocation here, it needs to be mapped somewhere first. */ 361 pMemNetBSD->Core.u.Phys.fAllocated = false; 362 pMemNetBSD->Core.u.Phys.PhysBase = Phys; 363 pMemNetBSD->Core.u.Phys.uCachePolicy = uCachePolicy; 364 TAILQ_INIT(&pMemNetBSD->pglist); 365 *ppMem = &pMemNetBSD->Core; 366 return VINF_SUCCESS; 348 if (pMemNetBSD) 349 { 350 /* there is no allocation here, it needs to be mapped somewhere first. */ 351 pMemNetBSD->Core.u.Phys.fAllocated = false; 352 pMemNetBSD->Core.u.Phys.PhysBase = Phys; 353 pMemNetBSD->Core.u.Phys.uCachePolicy = uCachePolicy; 354 TAILQ_INIT(&pMemNetBSD->pglist); 355 *ppMem = &pMemNetBSD->Core; 356 return VINF_SUCCESS; 357 } 358 return VERR_NO_MEMORY; 367 359 } 368 360 … … 373 365 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_LOCK, 374 366 (void *)R3Ptr, cb, pszTag); 375 if ( !pMemNetBSD)376 return VERR_NO_MEMORY;377 378 int rc = uvm_map_pageable(379 &((struct proc *)R0Process)->p_vmspace->vm_map,380 R3Ptr,381 R3Ptr + cb,382 0, 0);383 if (rc)384 {367 if (pMemNetBSD) 368 { 369 int rc = uvm_map_pageable(&((struct proc *)R0Process)->p_vmspace->vm_map, R3Ptr, R3Ptr + cb, 370 0 /*new_pageable*/, 0 /*lockflags*/); 371 if (!rc) 372 { 373 pMemNetBSD->Core.u.Lock.R0Process = R0Process; 374 *ppMem = &pMemNetBSD->Core; 375 return VINF_SUCCESS; 376 } 385 377 rtR0MemObjDelete(&pMemNetBSD->Core); 386 return VERR_NO_MEMORY; 387 } 388 389 pMemNetBSD->Core.u.Lock.R0Process = R0Process; 390 *ppMem = &pMemNetBSD->Core; 391 return VINF_SUCCESS; 378 return VERR_LOCK_FAILED; 379 } 380 return VERR_NO_MEMORY; 392 381 } 393 382 … … 397 386 /* Kernel memory (always?) wired; all memory allocated by vbox code is? */ 398 387 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_LOCK, pv, cb, pszTag); 399 if (!pMemNetBSD) 400 return VERR_NO_MEMORY; 401 402 pMemNetBSD->Core.u.Lock.R0Process = NIL_RTR0PROCESS; 403 pMemNetBSD->Core.pv = pv; 404 *ppMem = &pMemNetBSD->Core; 405 return VINF_SUCCESS; 388 if (pMemNetBSD) 389 { 390 pMemNetBSD->Core.u.Lock.R0Process = NIL_RTR0PROCESS; 391 pMemNetBSD->Core.pv = pv; 392 *ppMem = &pMemNetBSD->Core; 393 return VINF_SUCCESS; 394 } 395 return VERR_NO_MEMORY; 406 396 } 407 397 … … 418 408 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_RES_VIRT, 419 409 NULL, cb, pszTag); 420 if (!pMemNetBSD) 421 return VERR_NO_MEMORY; 422 423 vaddr_t virt = uvm_km_alloc(kernel_map, cb, uAlignment, 424 UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_CANFAIL); 425 if (virt == 0) 426 { 410 if (pMemNetBSD) 411 { 412 vaddr_t virt = uvm_km_alloc(kernel_map, cb, uAlignment, UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_CANFAIL); 413 if (virt != 0) 414 { 415 pMemNetBSD->Core.u.ResVirt.R0Process = NIL_RTR0PROCESS; 416 pMemNetBSD->Core.pv = (void *)virt; 417 *ppMem = &pMemNetBSD->Core; 418 return VINF_SUCCESS; 419 } 427 420 rtR0MemObjDelete(&pMemNetBSD->Core); 428 421 return VERR_NO_MEMORY; 429 422 } 430 431 pMemNetBSD->Core.u.ResVirt.R0Process = NIL_RTR0PROCESS; 432 pMemNetBSD->Core.pv = (void *)virt; 433 *ppMem = &pMemNetBSD->Core; 434 return VINF_SUCCESS; 423 return VERR_NO_MEMORY; 435 424 } 436 425 … … 466 455 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_MAPPING, NULL, sz, pszTag); 467 456 468 vaddr_t virt = uvm_km_alloc(kernel_map, sz, uAlignment, 469 UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_CANFAIL); 470 if (virt == 0) 471 { 472 rtR0MemObjDelete(&pMemNetBSD->Core); 473 return VERR_NO_MEMORY; 474 } 475 476 vm_prot_t prot = 0; 477 478 if ((fProt & RTMEM_PROT_READ) == RTMEM_PROT_READ) 479 prot |= VM_PROT_READ; 480 if ((fProt & RTMEM_PROT_WRITE) == RTMEM_PROT_WRITE) 481 prot |= VM_PROT_WRITE; 482 if ((fProt & RTMEM_PROT_EXEC) == RTMEM_PROT_EXEC) 483 prot |= VM_PROT_EXECUTE; 484 485 struct vm_page *page; 486 vaddr_t virt2 = virt; 487 size_t map_pos = 0; 488 TAILQ_FOREACH(page, &pMemNetBSD0->pglist, pageq.queue) 489 { 490 if (map_pos >= offSub) 491 { 492 if (cbSub > 0 && (map_pos >= offSub + cbSub)) 493 break; 494 495 pmap_kenter_pa(virt2, VM_PAGE_TO_PHYS(page), prot, 0); 496 virt2 += PAGE_SIZE; 497 } 498 map_pos += PAGE_SIZE; 499 } 500 501 pMemNetBSD->Core.pv = (void *)virt; 502 pMemNetBSD->Core.u.Mapping.R0Process = NIL_RTR0PROCESS; 503 *ppMem = &pMemNetBSD->Core; 504 505 return VINF_SUCCESS; 457 vaddr_t virt = uvm_km_alloc(kernel_map, sz, uAlignment, UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_CANFAIL); 458 if (virt != 0) 459 { 460 vm_prot_t prot = 0; 461 if (fProt & RTMEM_PROT_READ) 462 prot |= VM_PROT_READ; 463 if (fProt & RTMEM_PROT_WRITE) 464 prot |= VM_PROT_WRITE; 465 if (fProt & RTMEM_PROT_EXEC) 466 prot |= VM_PROT_EXECUTE; 467 468 struct vm_page *page; 469 vaddr_t virt2 = virt; 470 size_t map_pos = 0; 471 TAILQ_FOREACH(page, &pMemNetBSD0->pglist, pageq.queue) 472 { 473 if (map_pos >= offSub) 474 { 475 if (cbSub > 0 && (map_pos >= offSub + cbSub)) 476 break; 477 478 pmap_kenter_pa(virt2, VM_PAGE_TO_PHYS(page), prot, 0); 479 virt2 += PAGE_SIZE; 480 } 481 map_pos += PAGE_SIZE; 482 } 483 484 pMemNetBSD->Core.pv = (void *)virt; 485 pMemNetBSD->Core.u.Mapping.R0Process = NIL_RTR0PROCESS; 486 *ppMem = &pMemNetBSD->Core; 487 return VINF_SUCCESS; 488 } 489 490 rtR0MemObjDelete(&pMemNetBSD->Core); 491 return VERR_NO_MEMORY; 506 492 } 507 493 … … 518 504 DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt) 519 505 { 520 vm_prot_t ProtectionFlags = 0; 521 vaddr_t AddrStart = (vaddr_t)pMem->pv + offSub; 522 vm_map_t pVmMap = rtR0MemObjNetBSDGetMap(pMem); 523 524 if (!pVmMap) 525 return VERR_NOT_SUPPORTED; 526 527 if ((fProt & RTMEM_PROT_READ) == RTMEM_PROT_READ) 528 ProtectionFlags |= UVM_PROT_R; 529 if ((fProt & RTMEM_PROT_WRITE) == RTMEM_PROT_WRITE) 530 ProtectionFlags |= UVM_PROT_W; 531 if ((fProt & RTMEM_PROT_EXEC) == RTMEM_PROT_EXEC) 532 ProtectionFlags |= UVM_PROT_X; 533 534 int error = uvm_map_protect(pVmMap, AddrStart, AddrStart + cbSub, 535 ProtectionFlags, 0); 536 if (!error) 537 return VINF_SUCCESS; 538 506 vm_map_t const pVmMap = rtR0MemObjNetBSDGetMap(pMem); 507 if (pVmMap) 508 { 509 vaddr_t const AddrStart = (vaddr_t)pMem->pv + offSub; 510 vm_prot_t ProtectionFlags = 0; 511 if (fProt & RTMEM_PROT_READ) 512 ProtectionFlags |= UVM_PROT_R; 513 if (fProt & RTMEM_PROT_WRITE) 514 ProtectionFlags |= UVM_PROT_W; 515 if (fProt & RTMEM_PROT_EXEC) 516 ProtectionFlags |= UVM_PROT_X; 517 518 int rc = uvm_map_protect(pVmMap, AddrStart, AddrStart + cbSub, ProtectionFlags, 0); 519 if (!rc) 520 return VINF_SUCCESS; 521 return RTErrConvertFromErrno(rc); 522 } 539 523 return VERR_NOT_SUPPORTED; 540 524 } -
trunk/src/VBox/Runtime/r0drv/nt/memobj-r0drv-nt.cpp
r91483 r92246 283 283 if (pMemNt) 284 284 { 285 pMemNt->Core.fFlags |= RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC; 285 286 pMemNt->fAllocatedPagesForMdl = true; 286 287 pMemNt->cMdls = 1; … … 333 334 if (pMemNt) 334 335 { 336 pMemNt->Core.fFlags |= RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC; 335 337 pMemNt->cMdls = 1; 336 338 pMemNt->apMdls[0] = pMdl; … … 441 443 if (pMemNt) 442 444 { 445 pMemNt->Core.fFlags |= RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC; 443 446 pMemNt->fAllocatedPagesForMdl = true; 444 447 pMemNt->cMdls = 1; … … 525 528 if (pMemNt) 526 529 { 530 pMemNt->Core.fFlags |= RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC; 527 531 pMemNt->fAllocatedPagesForMdl = true; 528 532 pMemNt->cMdls = 1; … … 607 611 if (pMemNt) 608 612 { 613 pMemNt->Core.fFlags |= RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC; 609 614 pMemNt->Core.u.Cont.Phys = (RTHCPHYS)*MmGetMdlPfnArray(pMdl) << PAGE_SHIFT; 610 615 pMemNt->cMdls = 1; … … 668 673 if (pMemNt) 669 674 { 675 pMemNt->Core.fFlags |= RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC; 670 676 pMemNt->Core.u.Phys.fAllocated = true; 671 677 pMemNt->Core.u.Phys.PhysBase = (RTHCPHYS)paPfns[0] << PAGE_SHIFT; … … 705 711 if (pMemNt) 706 712 { 713 pMemNt->Core.fFlags |= RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC; 707 714 pMemNt->fAllocatedPagesForMdl = true; 708 715 pMemNt->cMdls = 1; -
trunk/src/VBox/Runtime/r0drv/os2/memobj-r0drv-os2.cpp
r91483 r92246 144 144 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF_DYN(RTR0MEMOBJOS2, aPages[cPages]), 145 145 RTR0MEMOBJTYPE_PAGE, NULL, cb, pszTag); 146 if (!pMemOs2) 147 return VERR_NO_MEMORY; 148 149 /* do the allocation. */ 150 int rc = KernVMAlloc(cb, VMDHA_FIXED, &pMemOs2->Core.pv, (PPVOID)-1, NULL); 151 if (!rc) 152 { 153 ULONG cPagesRet = cPages; 154 rc = KernLinToPageList(pMemOs2->Core.pv, cb, &pMemOs2->aPages[0], &cPagesRet); 146 if (pMemOs2) 147 { 148 /* do the allocation. */ 149 int rc = KernVMAlloc(cb, VMDHA_FIXED, &pMemOs2->Core.pv, (PPVOID)-1, NULL); 155 150 if (!rc) 156 151 { 157 rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet); 158 *ppMem = &pMemOs2->Core; 159 return VINF_SUCCESS; 152 ULONG cPagesRet = cPages; 153 rc = KernLinToPageList(pMemOs2->Core.pv, cb, &pMemOs2->aPages[0], &cPagesRet); 154 if (!rc) 155 { 156 rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet); 157 pMemOs2->Core.fFlags |= RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC; /* doesn't seem to be possible to zero anything */ 158 *ppMem = &pMemOs2->Core; 159 return VINF_SUCCESS; 160 } 161 KernVMFree(pMemOs2->Core.pv); 160 162 } 161 KernVMFree(pMemOs2->Core.pv);162 }163 rtR0MemObjDelete(&pMemOs2->Core);164 return RTErrConvertFromOS2(rc);163 rtR0MemObjDelete(&pMemOs2->Core); 164 return RTErrConvertFromOS2(rc); 165 } 166 return VERR_NO_MEMORY; 165 167 } 166 168 … … 181 183 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF_DYN(RTR0MEMOBJOS2, aPages[cPages]), 182 184 RTR0MEMOBJTYPE_LOW, NULL, cb, pszTag); 183 if (!pMemOs2) 184 return VERR_NO_MEMORY; 185 186 /* do the allocation. */ 187 int rc = KernVMAlloc(cb, VMDHA_FIXED, &pMemOs2->Core.pv, (PPVOID)-1, NULL); 188 if (!rc) 189 { 190 ULONG cPagesRet = cPages; 191 rc = KernLinToPageList(pMemOs2->Core.pv, cb, &pMemOs2->aPages[0], &cPagesRet); 185 if (pMemOs2) 186 { 187 /* do the allocation. */ 188 int rc = KernVMAlloc(cb, VMDHA_FIXED, &pMemOs2->Core.pv, (PPVOID)-1, NULL); 192 189 if (!rc) 193 190 { 194 rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet); 191 ULONG cPagesRet = cPages; 192 rc = KernLinToPageList(pMemOs2->Core.pv, cb, &pMemOs2->aPages[0], &cPagesRet); 193 if (!rc) 194 { 195 rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet); 196 pMemOs2->Core.fFlags |= RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC; /* doesn't seem to be possible to zero anything */ 197 *ppMem = &pMemOs2->Core; 198 return VINF_SUCCESS; 199 } 200 KernVMFree(pMemOs2->Core.pv); 201 } 202 rtR0MemObjDelete(&pMemOs2->Core); 203 rc = RTErrConvertFromOS2(rc); 204 return rc == VERR_NO_MEMORY ? VERR_NO_LOW_MEMORY : rc; 205 } 206 return VERR_NO_MEMORY; 207 } 208 209 210 DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, const char *pszTag) 211 { 212 NOREF(fExecutable); 213 214 /* create the object. */ 215 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_CONT, 216 NULL, cb, pszTag); 217 if (pMemOs2) 218 { 219 /* do the allocation. */ 220 ULONG ulPhys = ~0UL; 221 int rc = KernVMAlloc(cb, VMDHA_FIXED | VMDHA_CONTIG, &pMemOs2->Core.pv, (PPVOID)&ulPhys, NULL); 222 if (!rc) 223 { 224 Assert(ulPhys != ~0UL); 225 pMemOs2->Core.fFlags |= RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC; /* doesn't seem to be possible to zero anything */ 226 pMemOs2->Core.u.Cont.Phys = ulPhys; 195 227 *ppMem = &pMemOs2->Core; 196 228 return VINF_SUCCESS; 197 229 } 198 KernVMFree(pMemOs2->Core.pv); 199 } 200 rtR0MemObjDelete(&pMemOs2->Core); 201 rc = RTErrConvertFromOS2(rc); 202 return rc == VERR_NO_MEMORY ? VERR_NO_LOW_MEMORY : rc; 203 } 204 205 206 DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, const char *pszTag) 207 { 208 NOREF(fExecutable); 209 210 /* create the object. */ 211 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_CONT, 212 NULL, cb, pszTag); 213 if (!pMemOs2) 214 return VERR_NO_MEMORY; 215 216 /* do the allocation. */ 217 ULONG ulPhys = ~0UL; 218 int rc = KernVMAlloc(cb, VMDHA_FIXED | VMDHA_CONTIG, &pMemOs2->Core.pv, (PPVOID)&ulPhys, NULL); 219 if (!rc) 220 { 221 Assert(ulPhys != ~0UL); 222 pMemOs2->Core.u.Cont.Phys = ulPhys; 223 *ppMem = &pMemOs2->Core; 224 return VINF_SUCCESS; 225 } 226 rtR0MemObjDelete(&pMemOs2->Core); 227 return RTErrConvertFromOS2(rc); 230 rtR0MemObjDelete(&pMemOs2->Core); 231 return RTErrConvertFromOS2(rc); 232 } 233 return VERR_NO_MEMORY; 228 234 } 229 235 … … 241 247 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_PHYS, 242 248 NULL, cb, pszTag); 243 if (!pMemOs2) 244 return VERR_NO_MEMORY; 245 246 /* do the allocation. */ 247 ULONG ulPhys = ~0UL; 248 int rc = KernVMAlloc(cb, VMDHA_FIXED | VMDHA_CONTIG | (PhysHighest < _4G ? VMDHA_16M : 0), &pMemOs2->Core.pv, (PPVOID)&ulPhys, NULL); 249 if (!rc) 250 { 251 Assert(ulPhys != ~0UL); 252 pMemOs2->Core.u.Phys.fAllocated = true; 253 pMemOs2->Core.u.Phys.PhysBase = ulPhys; 254 *ppMem = &pMemOs2->Core; 255 return VINF_SUCCESS; 256 } 257 rtR0MemObjDelete(&pMemOs2->Core); 258 return RTErrConvertFromOS2(rc); 249 if (pMemOs2) 250 { 251 /* do the allocation. */ 252 ULONG ulPhys = ~0UL; 253 int rc = KernVMAlloc(cb, VMDHA_FIXED | VMDHA_CONTIG | (PhysHighest < _4G ? VMDHA_16M : 0), 254 &pMemOs2->Core.pv, (PPVOID)&ulPhys, NULL); 255 if (!rc) 256 { 257 Assert(ulPhys != ~0UL); 258 pMemOs2->Core.fFlags |= RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC; /* doesn't seem to be possible to zero anything */ 259 pMemOs2->Core.u.Phys.fAllocated = true; 260 pMemOs2->Core.u.Phys.PhysBase = ulPhys; 261 *ppMem = &pMemOs2->Core; 262 return VINF_SUCCESS; 263 } 264 rtR0MemObjDelete(&pMemOs2->Core); 265 return RTErrConvertFromOS2(rc); 266 } 267 return VERR_NO_MEMORY; 259 268 } 260 269 … … 275 284 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_PHYS, 276 285 NULL, cb, pszTag); 277 if (!pMemOs2) 278 return VERR_NO_MEMORY; 279 280 /* there is no allocation here, right? it needs to be mapped somewhere first. */ 281 pMemOs2->Core.u.Phys.fAllocated = false; 282 pMemOs2->Core.u.Phys.PhysBase = Phys; 283 pMemOs2->Core.u.Phys.uCachePolicy = uCachePolicy; 284 *ppMem = &pMemOs2->Core; 285 return VINF_SUCCESS; 286 if (pMemOs2) 287 { 288 /* there is no allocation here, right? it needs to be mapped somewhere first. */ 289 pMemOs2->Core.u.Phys.fAllocated = false; 290 pMemOs2->Core.u.Phys.PhysBase = Phys; 291 pMemOs2->Core.u.Phys.uCachePolicy = uCachePolicy; 292 *ppMem = &pMemOs2->Core; 293 return VINF_SUCCESS; 294 } 295 return VERR_NO_MEMORY; 286 296 } 287 297 … … 296 306 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF_DYN(RTR0MEMOBJOS2, aPages[cPages]), 297 307 RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb, pszTag); 298 if (!pMemOs2) 299 return VERR_NO_MEMORY; 300 301 /* lock it. */ 302 ULONG cPagesRet = cPages; 303 int rc = KernVMLock(VMDHL_LONG | (fAccess & RTMEM_PROT_WRITE ? VMDHL_WRITE : 0), 304 (void *)R3Ptr, cb, &pMemOs2->Lock, &pMemOs2->aPages[0], &cPagesRet); 305 if (!rc) 306 { 307 rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet); 308 Assert(cb == pMemOs2->Core.cb); 309 Assert(R3Ptr == (RTR3PTR)pMemOs2->Core.pv); 310 pMemOs2->Core.u.Lock.R0Process = R0Process; 311 *ppMem = &pMemOs2->Core; 312 return VINF_SUCCESS; 313 } 314 rtR0MemObjDelete(&pMemOs2->Core); 315 return RTErrConvertFromOS2(rc); 308 if (pMemOs2) 309 { 310 /* lock it. */ 311 ULONG cPagesRet = cPages; 312 int rc = KernVMLock(VMDHL_LONG | (fAccess & RTMEM_PROT_WRITE ? VMDHL_WRITE : 0), 313 (void *)R3Ptr, cb, &pMemOs2->Lock, &pMemOs2->aPages[0], &cPagesRet); 314 if (!rc) 315 { 316 rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet); 317 Assert(cb == pMemOs2->Core.cb); 318 Assert(R3Ptr == (RTR3PTR)pMemOs2->Core.pv); 319 pMemOs2->Core.u.Lock.R0Process = R0Process; 320 *ppMem = &pMemOs2->Core; 321 return VINF_SUCCESS; 322 } 323 rtR0MemObjDelete(&pMemOs2->Core); 324 return RTErrConvertFromOS2(rc); 325 } 326 return VERR_NO_MEMORY; 316 327 } 317 328 … … 323 334 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF_DYN(RTR0MEMOBJOS2, aPages[cPages]), 324 335 RTR0MEMOBJTYPE_LOCK, pv, cb, pszTag); 325 if (!pMemOs2) 326 return VERR_NO_MEMORY; 327 328 /* lock it. */ 329 ULONG cPagesRet = cPages; 330 int rc = KernVMLock(VMDHL_LONG | (fAccess & RTMEM_PROT_WRITE ? VMDHL_WRITE : 0), 331 pv, cb, &pMemOs2->Lock, &pMemOs2->aPages[0], &cPagesRet); 332 if (!rc) 333 { 334 rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet); 335 pMemOs2->Core.u.Lock.R0Process = NIL_RTR0PROCESS; 336 *ppMem = &pMemOs2->Core; 337 return VINF_SUCCESS; 338 } 339 rtR0MemObjDelete(&pMemOs2->Core); 340 return RTErrConvertFromOS2(rc); 336 if (pMemOs2) 337 { 338 /* lock it. */ 339 ULONG cPagesRet = cPages; 340 int rc = KernVMLock(VMDHL_LONG | (fAccess & RTMEM_PROT_WRITE ? VMDHL_WRITE : 0), 341 pv, cb, &pMemOs2->Lock, &pMemOs2->aPages[0], &cPagesRet); 342 if (!rc) 343 { 344 rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet); 345 pMemOs2->Core.u.Lock.R0Process = NIL_RTR0PROCESS; 346 *ppMem = &pMemOs2->Core; 347 return VINF_SUCCESS; 348 } 349 rtR0MemObjDelete(&pMemOs2->Core); 350 return RTErrConvertFromOS2(rc); 351 } 352 return VERR_NO_MEMORY; 341 353 } 342 354 -
trunk/src/VBox/Runtime/r0drv/solaris/memobj-r0drv-solaris.c
r91483 r92246 654 654 /* Create the object. */ 655 655 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PAGE, NULL, cb, pszTag); 656 if (RT_UNLIKELY(!pMemSolaris)) 657 return VERR_NO_MEMORY; 658 659 void *pvMem = ddi_umem_alloc(cb, DDI_UMEM_SLEEP, &pMemSolaris->Cookie); 660 if (RT_UNLIKELY(!pvMem)) 661 { 656 if (pMemSolaris) 657 { 658 void *pvMem = ddi_umem_alloc(cb, DDI_UMEM_SLEEP, &pMemSolaris->Cookie); 659 if (pvMem) 660 { 661 pMemSolaris->Core.fFlags |= RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC; 662 pMemSolaris->Core.pv = pvMem; 663 pMemSolaris->pvHandle = NULL; 664 *ppMem = &pMemSolaris->Core; 665 return VINF_SUCCESS; 666 } 662 667 rtR0MemObjDelete(&pMemSolaris->Core); 663 668 return VERR_NO_PAGE_MEMORY; 664 669 } 665 666 pMemSolaris->Core.pv = pvMem; 667 pMemSolaris->pvHandle = NULL; 668 *ppMem = &pMemSolaris->Core; 669 return VINF_SUCCESS; 670 return VERR_NO_MEMORY; 670 671 } 671 672 … … 684 685 /* Create the object */ 685 686 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOW, NULL, cb, pszTag); 686 if (!pMemSolaris) 687 return VERR_NO_MEMORY; 688 689 /* Allocate physically low page-aligned memory. */ 690 uint64_t uPhysHi = _4G - 1; 691 void *pvMem = rtR0SolMemAlloc(uPhysHi, NULL /* puPhys */, cb, PAGE_SIZE, false /* fContig */); 692 if (RT_UNLIKELY(!pvMem)) 693 { 687 if (pMemSolaris) 688 { 689 /* Allocate physically low page-aligned memory. */ 690 uint64_t uPhysHi = _4G - 1; 691 void *pvMem = rtR0SolMemAlloc(uPhysHi, NULL /* puPhys */, cb, PAGE_SIZE, false /* fContig */); 692 if (pvMem) 693 { 694 pMemSolaris->Core.fFlags |= RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC; 695 pMemSolaris->Core.pv = pvMem; 696 pMemSolaris->pvHandle = NULL; 697 *ppMem = &pMemSolaris->Core; 698 return VINF_SUCCESS; 699 } 694 700 rtR0MemObjDelete(&pMemSolaris->Core); 695 701 return VERR_NO_LOW_MEMORY; 696 702 } 697 pMemSolaris->Core.pv = pvMem; 698 pMemSolaris->pvHandle = NULL; 699 *ppMem = &pMemSolaris->Core; 700 return VINF_SUCCESS; 703 return VERR_NO_MEMORY; 701 704 } 702 705 … … 713 716 #if HC_ARCH_BITS == 64 714 717 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS_NC, NULL, cb, pszTag); 715 if (RT_UNLIKELY(!pMemSolaris)) 716 return VERR_NO_MEMORY; 717 718 if (PhysHighest == NIL_RTHCPHYS) 719 { 720 uint64_t PhysAddr = UINT64_MAX; 721 void *pvPages = rtR0MemObjSolPagesAlloc(&PhysAddr, cb); 722 if (!pvPages) 723 { 724 LogRel(("rtR0MemObjNativeAllocPhysNC: rtR0MemObjSolPagesAlloc failed for cb=%u.\n", cb)); 725 rtR0MemObjDelete(&pMemSolaris->Core); 726 return VERR_NO_MEMORY; 727 } 728 Assert(PhysAddr != UINT64_MAX); 729 Assert(!(PhysAddr & PAGE_OFFSET_MASK)); 730 731 pMemSolaris->Core.pv = NULL; 732 pMemSolaris->pvHandle = pvPages; 733 pMemSolaris->fIndivPages = true; 718 if (pMemSolaris) 719 { 720 if (PhysHighest == NIL_RTHCPHYS) 721 { 722 uint64_t PhysAddr = UINT64_MAX; 723 void *pvPages = rtR0MemObjSolPagesAlloc(&PhysAddr, cb); 724 if (!pvPages) 725 { 726 LogRel(("rtR0MemObjNativeAllocPhysNC: rtR0MemObjSolPagesAlloc failed for cb=%u.\n", cb)); 727 rtR0MemObjDelete(&pMemSolaris->Core); 728 return VERR_NO_MEMORY; 729 } 730 Assert(PhysAddr != UINT64_MAX); 731 Assert(!(PhysAddr & PAGE_OFFSET_MASK)); 732 733 pMemSolaris->Core.pv = NULL; 734 pMemSolaris->pvHandle = pvPages; 735 pMemSolaris->fIndivPages = true; 736 } 737 else 738 { 739 /* 740 * If we must satisfy an upper limit constraint, it isn't feasible to grab individual pages. 741 * We fall back to using contig_alloc(). 742 */ 743 uint64_t PhysAddr = UINT64_MAX; 744 void *pvMem = rtR0SolMemAlloc(PhysHighest, &PhysAddr, cb, PAGE_SIZE, false /* fContig */); 745 if (!pvMem) 746 { 747 LogRel(("rtR0MemObjNativeAllocPhysNC: rtR0SolMemAlloc failed for cb=%u PhysHighest=%RHp.\n", cb, PhysHighest)); 748 rtR0MemObjDelete(&pMemSolaris->Core); 749 return VERR_NO_MEMORY; 750 } 751 Assert(PhysAddr != UINT64_MAX); 752 Assert(!(PhysAddr & PAGE_OFFSET_MASK)); 753 754 pMemSolaris->Core.pv = pvMem; 755 pMemSolaris->pvHandle = NULL; 756 pMemSolaris->fIndivPages = false; 757 } 758 pMemSolaris->Core.fFlags |= RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC; 734 759 *ppMem = &pMemSolaris->Core; 735 760 return VINF_SUCCESS; 736 761 } 737 else 738 { 739 /* 740 * If we must satisfy an upper limit constraint, it isn't feasible to grab individual pages. 741 * We fall back to using contig_alloc(). 742 */ 743 uint64_t PhysAddr = UINT64_MAX; 744 void *pvMem = rtR0SolMemAlloc(PhysHighest, &PhysAddr, cb, PAGE_SIZE, false /* fContig */); 745 if (!pvMem) 746 { 747 LogRel(("rtR0MemObjNativeAllocPhysNC: rtR0SolMemAlloc failed for cb=%u PhysHighest=%RHp.\n", cb, PhysHighest)); 748 rtR0MemObjDelete(&pMemSolaris->Core); 749 return VERR_NO_MEMORY; 750 } 751 Assert(PhysAddr != UINT64_MAX); 752 Assert(!(PhysAddr & PAGE_OFFSET_MASK)); 753 754 pMemSolaris->Core.pv = pvMem; 755 pMemSolaris->pvHandle = NULL; 756 pMemSolaris->fIndivPages = false; 757 *ppMem = &pMemSolaris->Core; 758 return VINF_SUCCESS; 759 } 762 return VERR_NO_MEMORY; 760 763 761 764 #else /* 32 bit: */ … … 798 801 { 799 802 AssertMsg(!(PhysAddr & (cb - 1)), ("%RHp\n", PhysAddr)); 803 pMemSolaris->Core.fFlags |= RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC; /*?*/ 800 804 pMemSolaris->Core.pv = NULL; 801 805 pMemSolaris->Core.u.Phys.PhysBase = PhysAddr; … … 822 826 Assert(PhysAddr + cb <= PhysHighest); 823 827 828 pMemSolaris->Core.fFlags |= RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC; 824 829 pMemSolaris->Core.pv = pvMem; 825 830 pMemSolaris->Core.u.Phys.PhysBase = PhysAddr;
Note:
See TracChangeset
for help on using the changeset viewer.