Changeset 4474 in vbox for trunk/src/VBox/Runtime/r0drv/solaris/memobj-r0drv-solaris.c
- Timestamp:
- Aug 31, 2007 7:23:49 PM (17 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Runtime/r0drv/solaris/memobj-r0drv-solaris.c
r4287 r4474 48 48 } RTR0MEMOBJSOLARIS, *PRTR0MEMOBJSOLARIS; 49 49 50 51 /******************************************************************************* 52 * Internal Functions * 53 *******************************************************************************/ 50 /** 51 * Used for supplying the solaris kernel info. about memory limits 52 * during contiguous allocations (i_ddi_mem_alloc) 53 */ 54 struct ddi_dma_attr g_SolarisX86PhysMemLimits = 55 { 56 DMA_ATTR_V0, /* Version Number */ 57 (uint64_t)0, /* lower limit */ 58 (uint64_t)0xffffffff, /* high limit (32-bit PA, 4G) */ 59 (uint64_t)0xffffffff, /* counter limit */ 60 (uint64_t)PAGE_SIZE, /* alignment */ 61 (uint64_t)PAGE_SIZE, /* burst size */ 62 (uint64_t)PAGE_SIZE, /* effective DMA size */ 63 (uint64_t)0xffffffff, /* max DMA xfer size */ 64 (uint64_t)0xffffffff, /* segment boundary */ 65 1, /* scatter-gather list length (1 for contiguous) */ 66 1, /* device granularity */ 67 0 /* bus-specific flags */ 68 }; 69 70 71 72 static uint64_t rtR0MemObjSolarisVirtToPhys(struct hat* hatSpace, caddr_t virtAddr) 73 { 74 /* We could use paddr_t (more solaris-like) rather than uint64_t but paddr_t isn't defined for 64-bit */ 75 pfn_t pfn = hat_getpfnum(hatSpace, virtAddr); 76 if (pfn == PFN_INVALID) 77 { 78 AssertMsgFailed(("rtR0MemObjSolarisVirtToPhys: hat_getpfnum for %p failed.\n", virtAddr)); 79 return PFN_INVALID; 80 } 81 82 /* Both works, but second will work for non-page aligned virtAddr */ 83 #if 0 84 uint64_t physAddr = PAGE_SIZE * pfn; 85 #else 86 uint64_t physAddr = ((uint64_t)pfn << MMU_PAGESHIFT) | ((uint64_t)virtAddr & MMU_PAGEOFFSET); 87 #endif 88 return physAddr; 89 } 54 90 55 91 … … 61 97 { 62 98 case RTR0MEMOBJTYPE_CONT: 63 ddi_mem_free(pMemSolaris->Core.pv);99 i_ddi_mem_free(pMemSolaris->Core.pv, NULL); 64 100 break; 65 101 66 102 case RTR0MEMOBJTYPE_PAGE: 67 #if 0 68 ddi_umem_free(pMemSolaris->Cookie); 69 #endif 70 ddi_mem_free(pMemSolaris->Core.pv); 103 kmem_free(pMemSolaris->Core.pv, pMemSolaris->Core.cb); 71 104 break; 72 105 73 106 case RTR0MEMOBJTYPE_LOCK: 74 107 { 108 cmn_err(CE_NOTE, "rtR0MemObjNativeFree: LOCK\n"); 75 109 struct as* addrSpace; 76 110 if (pMemSolaris->Core.u.Lock.R0Process == NIL_RTR0PROCESS) … … 85 119 case RTR0MEMOBJTYPE_MAPPING: 86 120 { 121 struct hat* hatSpace; 122 struct as* addrSpace; 123 cmn_err(CE_NOTE, "rtR0MemObjNativeFree: MAPPING\n"); 87 124 if (pMemSolaris->Core.u.Mapping.R0Process == NIL_RTR0PROCESS) 88 125 { 89 126 /* Kernel process*/ 90 hat_unload(kas.a_hat, (caddr_t)pMemSolaris->Core.pv, pMemSolaris->Core.cb, HAT_UNLOAD_UNLOCK); 91 vmem_xfree(heap32_arena, (caddr_t)pMemSolaris->Core.pv, pMemSolaris->Core.cb); 127 cmn_err(CE_NOTE, "rtR0MemObjNativeFree: MAPPING: kernel\n"); 128 hatSpace = kas.a_hat; 129 addrSpace = &kas; 92 130 } 93 131 else 94 132 { 95 133 /* User process */ 96 proc_t *p = (proc_t *)pMemSolaris->Core.u.Mapping.R0Process; 97 struct as *useras = p->p_as; 98 hat_unload(useras->a_hat, (caddr_t)pMemSolaris->Core.pv, pMemSolaris->Core.cb, HAT_UNLOAD_UNLOCK); 134 cmn_err(CE_NOTE, "rtR0MemObjNativeFree: MAPPING: userProcess\n"); 135 proc_t *userProc = (proc_t *)pMemSolaris->Core.u.Mapping.R0Process; 136 hatSpace = userProc->p_as->a_hat; 137 addrSpace = userProc->p_as; 99 138 } 100 139 140 hat_unload(hatSpace, pMemSolaris->Core.pv, pMemSolaris->Core.cb, HAT_UNLOAD_UNLOCK); 141 as_unmap(addrSpace, pMemSolaris->Core.pv, pMemSolaris->Core.cb); 142 cmn_err(CE_NOTE, "rtR0MemObjNativeFree: MAPPING: removed fine\n"); 101 143 break; 102 144 } 103 145 104 146 /* unused */ 105 147 case RTR0MEMOBJTYPE_LOW: … … 121 163 if (!pMemSolaris) 122 164 return VERR_NO_MEMORY; 123 #if 1 124 /* Allocate physically contiguous page-aligned memory. */ 125 caddr_t virtAddr; 126 int rc = i_ddi_mem_alloc(NULL, &g_SolarisX86PhysMemLimits, cb, 1, 0, NULL, &virtAddr, NULL, NULL); 127 if (rc != DDI_SUCCESS) 165 166 /** @todo r=bird: The man page says: "The allocated memory is at least double-word aligned, so it can hold any C data structure. No greater alignment can be assumed." */ 167 void* virtAddr = kmem_alloc(cb, KM_SLEEP); 168 if (!virtAddr) 128 169 { 129 170 rtR0MemObjDelete(&pMemSolaris->Core); … … 132 173 133 174 pMemSolaris->Core.pv = virtAddr; 134 pMemSolaris->Core.u.Cont.Phys = PAGE_SIZE * hat_getpfnum(kas.a_hat, virtAddr); 135 *ppMem = &pMemSolaris->Core; 136 cmn_err(CE_NOTE, "xAllocPage success physAddr=%p virt=%p\n", PAGE_SIZE * hat_getpfnum(kas.a_hat, virtAddr), virtAddr); 137 #endif 138 #if 0 139 /* Allocate page-aligned kernel memory */ 140 void *pv = ddi_umem_alloc(cb, DDI_UMEM_SLEEP, &pMemSolaris->Cookie); 141 if (pv == NULL) 142 { 143 rtR0MemObjDelete(&pMemSolaris->Core); 144 return VERR_NO_MEMORY; 145 } 146 147 pMemSolaris->Core.pv = pv; 148 *ppMem = &pMemSolaris->Core; 149 cmn_err(CE_NOTE, "ddi_umem_alloc, success\n"); 150 #endif 175 pMemSolaris->ppShadowPages = NULL; 176 *ppMem = &pMemSolaris->Core; 151 177 return VINF_SUCCESS; 152 178 } … … 170 196 } 171 197 } 172 173 198 return rc; 174 199 } … … 192 217 return VERR_NO_MEMORY; 193 218 } 194 219 195 220 pMemSolaris->Core.pv = virtAddr; 196 pMemSolaris->Core.u.Cont.Phys = PAGE_SIZE * hat_getpfnum(kas.a_hat, virtAddr); 221 pMemSolaris->Core.u.Cont.Phys = rtR0MemObjSolarisVirtToPhys(kas.a_hat, virtAddr); 222 pMemSolaris->ppShadowPages = NULL; 197 223 *ppMem = &pMemSolaris->Core; 198 224 return VINF_SUCCESS; … … 202 228 int rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest) 203 229 { 204 /** @todo rtR0MemObjNativeAllocPhys / solaris */205 return rtR0MemObjNativeAllocPhys(ppMem, cb, PhysHighest);230 /** @todo rtR0MemObjNativeAllocPhysNC / solaris */ 231 return VERR_NOT_SUPPORTED; /* see the RTR0MemObjAllocPhysNC specs */ 206 232 } 207 233 … … 222 248 return VERR_NO_MEMORY; 223 249 224 /* @todo validate Phys as a proper physical address */225 226 250 /* There is no allocation here, it needs to be mapped somewhere first */ 227 251 pMemSolaris->Core.u.Phys.fAllocated = false; … … 235 259 { 236 260 /* Create the object */ 237 PRTR0MEMOBJSOLARIS pMemSolaris = (PRTR0MEMOBJSOLARIS)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb);238 if (!pMemSolaris) 239 return VERR_NO_MEMORY; 240 241 proc_t *user Process= curproc;261 PRTR0MEMOBJSOLARIS pMemSolaris = (PRTR0MEMOBJSOLARIS)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb); 262 if (!pMemSolaris) 263 return VERR_NO_MEMORY; 264 265 proc_t *userproc = curproc; 242 266 if (R0Process != NIL_RTR0PROCESS) 243 userProcess = (proc_t *)R0Process; 244 245 struct as* userAddrSpace = userProcess->p_as; 246 caddr_t userAddr = (caddr_t)((uintptr_t)R3Ptr & (uintptr_t)PAGEMASK); 267 userproc = (proc_t *)R0Process; 268 269 struct as *useras = userproc->p_as; 247 270 page_t **ppl; 248 271 249 int rc = as_pagelock(user AddrSpace, &ppl, userAddr, cb, S_WRITE);272 int rc = as_pagelock(useras, &ppl, (caddr_t)R3Ptr, cb, S_WRITE); 250 273 if (rc != 0) 251 return VERR_NO_MEMORY; 252 253 pMemSolaris->Core.u.Lock.R0Process = (RTR0PROCESS)userProcess; 274 { 275 cmn_err(CE_NOTE,"rtR0MemObjNativeLockUser: as_pagelock failed rc=%d\n", rc); 276 return VERR_NO_MEMORY; 277 } 278 279 if (!ppl) 280 { 281 as_pageunlock(useras, ppl, (caddr_t)R3Ptr, cb, S_WRITE); 282 cmn_err(CE_NOTE, "rtR0MemObjNativeLockUser: as_pagelock failed to get shadow pages\n"); 283 return VERR_NO_MEMORY; 284 } 285 286 pMemSolaris->Core.u.Lock.R0Process = (RTR0PROCESS)userproc; 254 287 pMemSolaris->ppShadowPages = ppl; 255 288 *ppMem = &pMemSolaris->Core; 256 257 return VINF_SUCCESS; 258 259 #if 0 260 /* Lock down the physical pages of current process' virtual address space */ 261 int rc = ddi_umem_lock(pv, cb, DDI_UMEMLOCK_WRITE, &pMemSolaris->Cookie); 262 if (rc != 0) 263 { 264 rtR0MemObjDelete(&pMemSolaris->Core); 265 return VERR_NO_MEMORY; /** @todo fix mach -> vbox error conversion for Solaris. */ 266 } 267 268 pMemSolaris->Core.u.Lock.R0Process = R0Process; 269 *ppMem = &pMemSolaris->Core; 270 return VINF_SUCCESS; 271 #endif 289 return VINF_SUCCESS; 272 290 } 273 291 … … 280 298 return VERR_NO_MEMORY; 281 299 282 caddr_t userAddr = (caddr_t)((uintptr_t)pv & (uintptr_t)PAGEMASK);300 caddr_t virtAddr = (caddr_t)((uintptr_t)pv & (uintptr_t)PAGEMASK); 283 301 page_t **ppl; 284 302 285 int rc = as_pagelock(&kas, &ppl, userAddr, cb, S_WRITE);303 int rc = as_pagelock(&kas, &ppl, virtAddr, cb, S_WRITE); 286 304 if (rc != 0) 287 305 return VERR_NO_MEMORY; 306 307 if (!ppl) 308 { 309 as_pageunlock(&kas, ppl, virtAddr, cb, S_WRITE); 310 cmn_err(CE_NOTE, "rtR0MemObjNativeLockUser: failed to get shadow pages\n"); 311 return VERR_NO_MEMORY; 312 } 288 313 289 314 pMemSolaris->Core.u.Lock.R0Process = NIL_RTR0PROCESS; /* means kernel, see rtR0MemObjNativeFree() */ 290 315 pMemSolaris->ppShadowPages = ppl; 291 316 *ppMem = &pMemSolaris->Core; 292 293 317 return VINF_SUCCESS; 294 318 } … … 309 333 { 310 334 PRTR0MEMOBJSOLARIS pMemToMapSolaris = (PRTR0MEMOBJSOLARIS)pMemToMap; 311 size_t size = P2ROUNDUP(pMemToMapSolaris->Core.cb, PAGE_SIZE); 312 void* pv = pMemToMapSolaris->Core.pv; 313 314 void* kernVirtAddr = vmem_xalloc(heap32_arena, size, PAGE_SIZE, 0, PAGE_SIZE, NULL, 0, VM_SLEEP); 315 if (kernVirtAddr == NULL) 316 return VERR_NO_MEMORY; 317 318 hat_devload(kas.a_hat, (caddr_t)kernVirtAddr, size, hat_getpfnum(kas.a_hat, pv), PROT_READ | PROT_WRITE | PROT_EXEC, 319 HAT_STRICTORDER | HAT_LOAD_NOCONSIST | HAT_LOAD_LOCK); 320 335 size_t size = P2ROUNDUP(pMemToMapSolaris->Core.cb, PAGE_SIZE); /* r=bird: not necessary, see the specs / caller implementation. */ 336 void *pv = pMemToMapSolaris->Core.pv; 337 pgcnt_t cPages = btop(size); 338 pgcnt_t iPage; 339 caddr_t addr; 340 int rc; 341 321 342 /* Create the mapping object */ 322 PRTR0MEMOBJSOLARIS pMemSolaris = (PRTR0MEMOBJSOLARIS)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_MAPPING, 323 kernVirtAddr, pMemToMapSolaris->Core.cb); 324 if (pMemSolaris == NULL) 325 { 326 hat_unload(kas.a_hat, (caddr_t)kernVirtAddr, size, HAT_UNLOAD_UNLOCK); 327 vmem_xfree(heap32_arena, kernVirtAddr, size); 328 return VERR_NO_MEMORY; 329 } 330 331 pMemSolaris->Core.u.Mapping.R0Process = NIL_RTR0PROCESS; /* NIL_RTR0PROCESS means kernel process */ 332 *ppMem = &pMemSolaris->Core; 343 PRTR0MEMOBJSOLARIS pMemSolaris = (PRTR0MEMOBJSOLARIS)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_MAPPING, pv, size); 344 if (!pMemSolaris) 345 return VERR_NO_MEMORY; 346 347 as_rangelock(&kas); 348 if (pvFixed != (void *)-1) 349 { 350 /* Use user specified address */ 351 addr = (caddr_t)pvFixed; 352 353 /* Blow away any previous mapping */ 354 as_unmap(&kas, addr, size); 355 } 356 else 357 { 358 /* Let the system choose an address */ 359 map_addr(&addr, size, 0, 1, MAP_SHARED | MAP_ANONYMOUS); 360 if (addr == NULL) 361 { 362 as_rangeunlock(&kas); 363 cmn_err(CE_NOTE, "rtR0MemObjNativeMapKernel: map_addr failed\n"); 364 return VERR_NO_MEMORY; 365 } 366 /** @todo r=bird: check address against uAlignment, just fail if it's not matching. */ 367 } 368 369 /* Our protection masks are identical to <sys/mman.h> but we 370 * need to add PROT_USER for the pages to be accessible by user 371 */ 372 struct segvn_crargs crArgs = SEGVN_ZFOD_ARGS(fProt | PROT_USER, PROT_ALL); 373 rc = as_map(&kas, addr, size, segvn_create, &crArgs); 374 as_rangeunlock(&kas); 375 if (rc != 0) 376 { 377 cmn_err(CE_NOTE, "rtR0MemObjNativeMapKernel: as_map failure.\n"); 378 return VERR_NO_MEMORY; 379 } 380 381 /* Map each page into kernel space */ 382 caddr_t kernAddr = pv; 383 caddr_t pageAddr = addr; 384 for (iPage = 0; iPage < cPages; iPage++) 385 { 386 page_t *pp = page_numtopp_nolock(hat_getpfnum(kas.a_hat, kernAddr)); 387 hat_memload(kas.a_hat, pageAddr, pp, (fProt | PROT_USER), HAT_LOAD_LOCK); 388 pageAddr += ptob(1); 389 kernAddr += ptob(1); 390 } 391 392 pMemSolaris->Core.u.Mapping.R0Process = NIL_RTR0PROCESS; /* means kernel */ 393 pMemSolaris->Core.pv = addr; 394 *ppMem = &pMemSolaris->Core; 395 cmn_err(CE_NOTE, "done rtR0MemObjNativeMapKernel: Core.pv=%p\n", addr); 333 396 return VINF_SUCCESS; 334 397 } … … 338 401 { 339 402 PRTR0MEMOBJSOLARIS pMemToMapSolaris = (PRTR0MEMOBJSOLARIS)pMemToMap; 340 size_t size = P2ROUNDUP(pMemToMapSolaris->Core.cb, PAGE_SIZE); 403 size_t size = P2ROUNDUP(pMemToMapSolaris->Core.cb, PAGE_SIZE); /** @todo r=bird: this isn't necessary, see the specs. */ 341 404 proc_t *userproc = (proc_t *)R0Process; 342 405 struct as *useras = userproc->p_as; 343 406 void *pv = pMemToMapSolaris->Core.pv; 344 pfn_t pfnum = hat_getpfnum(kas.a_hat, pv); 407 pgcnt_t cPages = btop(size); 408 pgcnt_t iPage; 409 caddr_t addr; 345 410 int rc; 346 347 void* kernVirtAddr = vmem_xalloc(heap32_arena, size, PAGE_SIZE, 0, PAGE_SIZE, NULL, 0, VM_SLEEP); 348 if (kernVirtAddr == NULL) 349 return VERR_NO_MEMORY; 350 351 cmn_err(CE_NOTE, "vmem_xalloc successful.\n"); 352 353 /* Wrong ones to use: as_map() */ 354 hat_devload(kas.a_hat, (caddr_t)kernVirtAddr, size, pfnum, PROT_READ | PROT_WRITE | PROT_EXEC, 355 HAT_STRICTORDER | HAT_LOAD_NOCONSIST | HAT_LOAD_LOCK); 356 357 cmn_err(CE_NOTE, "hat_devload successful.\n"); 358 411 359 412 /* Create the mapping object */ 360 PRTR0MEMOBJSOLARIS pMemSolaris = (PRTR0MEMOBJSOLARIS)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_MAPPING, 361 pv, pMemToMapSolaris->Core.cb); 362 if (pMemSolaris == NULL) 363 { 364 /* @todo cleanup */ 365 return VERR_NO_MEMORY; 366 } 367 368 pMemSolaris->Core.u.Mapping.R0Process = R0Process; 369 *ppMem = &pMemSolaris->Core; 413 PRTR0MEMOBJSOLARIS pMemSolaris = (PRTR0MEMOBJSOLARIS)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_MAPPING, pv, size); 414 if (!pMemSolaris) 415 return VERR_NO_MEMORY; 416 417 as_rangelock(useras); 418 if (R3PtrFixed != (RTR3PTR)-1) 419 { 420 /* Use user specified address */ 421 addr = (caddr_t)R3PtrFixed; 422 423 /* Verify user address (a bit paranoid) */ 424 rc = valid_usr_range(addr, size, fProt, useras, (caddr_t)USERLIMIT32); 425 if (rc != RANGE_OKAY) 426 { 427 as_rangeunlock(useras); 428 cmn_err(CE_NOTE, "rtR0MemObjNativeMapUser: valid_usr_range failed, returned %d\n", rc); 429 return VERR_INVALID_POINTER; 430 } 431 432 /* Blow away any previous mapping */ 433 as_unmap(useras, addr, size); 434 } 435 else 436 { 437 /* Let the system choose an address */ 438 map_addr(&addr, size, 0, 1, MAP_SHARED | MAP_ANONYMOUS); 439 if (addr == NULL) 440 { 441 as_rangeunlock(useras); 442 cmn_err(CE_NOTE, "rtR0MemObjNativeMapUser: map_addr failed\n"); 443 return VERR_NO_MEMORY; 444 } 445 446 /** @todo r=bird: check address against uAlignment, just fail if it's not matching. */ 447 } 448 449 /* Our protection masks are identical to <sys/mman.h> but we 450 * need to add PROT_USER for the pages to be accessible by user 451 */ 452 struct segvn_crargs crArgs = SEGVN_ZFOD_ARGS(fProt | PROT_USER, PROT_ALL); 453 rc = as_map(useras, addr, size, segvn_create, &crArgs); 454 as_rangeunlock(useras); 455 if (rc != 0) 456 { 457 cmn_err(CE_NOTE, "rtR0MemObjNativeMapUser: as_map failure.\n"); 458 return VERR_NO_MEMORY; 459 } 460 461 #if 0 462 /* Lock down the pages and get the shadow page list 463 * In this case we must as_pageunlock if(ppShadowPages) exists while freeing CONT, PAGE 464 */ 465 rc = as_pagelock(&kas, &pMemToMapSolaris->ppShadowPages, pv, size, S_WRITE); 466 if (rc != 0 || pMemToMapSolaris->ppShadowPages == NULL) 467 { 468 cmn_err(CE_NOTE, "rtR0MemObjNativeMapUser: as_pagelock failed\n"); 469 as_unmap(useras, addr, size); 470 return VERR_NO_MEMORY; 471 } 472 473 /* Map each page into user space */ 474 caddr_t pageAddr = addr; 475 for (iPage = 0; iPage < cPages; iPage++) 476 { 477 hat_memload(useras->a_hat, pageAddr, pMemToMapSolaris->ppShadowPages[iPage], fProt | PROT_USER, 478 HAT_LOAD_NOCONSIST | HAT_STRICTORDER | HAT_LOAD_LOCK); 479 pageAddr += ptob(1); 480 } 481 #else 482 /* Map each page into user space */ 483 caddr_t kernAddr = pv; 484 caddr_t pageAddr = addr; 485 for (iPage = 0; iPage < cPages; iPage++) 486 { 487 page_t *pp = page_numtopp_nolock(hat_getpfnum(kas.a_hat, kernAddr)); 488 hat_memload(useras->a_hat, pageAddr, pp, (fProt | PROT_USER), HAT_LOAD_LOCK); 489 pageAddr += ptob(1); 490 kernAddr += ptob(1); 491 } 492 #endif 493 494 pMemSolaris->Core.u.Mapping.R0Process = (RTR0PROCESS)userproc; 495 pMemSolaris->Core.pv = addr; 496 *ppMem = &pMemSolaris->Core; 497 cmn_err(CE_NOTE, "done MemObjNativeMapUser: Core.pv=%p\n", addr); 370 498 return VINF_SUCCESS; 371 499 } … … 378 506 switch (pMemSolaris->Core.enmType) 379 507 { 380 case RTR0MEMOBJTYPE_LOCK:381 {382 /* @todo figure this one out */383 return NIL_RTHCPHYS;384 }385 386 508 case RTR0MEMOBJTYPE_PAGE: 387 509 case RTR0MEMOBJTYPE_LOW: 510 case RTR0MEMOBJTYPE_MAPPING: 388 511 { 389 512 uint8_t *pb = (uint8_t *)pMemSolaris->Core.pv + ((size_t)iPage << PAGE_SHIFT); 390 return PAGE_SIZE * hat_getpfnum(kas.a_hat, pb); 513 return rtR0MemObjSolarisVirtToPhys(kas.a_hat, pb); 514 } 515 516 case RTR0MEMOBJTYPE_LOCK: 517 { 518 struct hat *hatSpace; 519 if (pMemSolaris->Core.u.Lock.R0Process != NIL_RTR0PROCESS) 520 { 521 /* User */ 522 proc_t *userProc = (proc_t *)pMemSolaris->Core.u.Lock.R0Process; 523 hatSpace = userProc->p_as->a_hat; 524 } 525 else /* Kernel */ 526 hatSpace = kas.a_hat; 527 528 uint8_t *pb = (uint8_t *)pMemSolaris->Core.pv + ((size_t)iPage << PAGE_SHIFT); 529 return rtR0MemObjSolarisVirtToPhys(hatSpace, pb); 391 530 } 392 531 … … 397 536 return pMemSolaris->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT); 398 537 538 case RTR0MEMOBJTYPE_PHYS_NC: 539 AssertFailed(/* not implemented */); 399 540 case RTR0MEMOBJTYPE_RES_VIRT: 400 case RTR0MEMOBJTYPE_MAPPING:401 541 default: 402 542 return NIL_RTHCPHYS; 403 543 } 404 544 } 545
Note:
See TracChangeset
for help on using the changeset viewer.