Changeset 27673 in vbox for trunk/src/VBox/Runtime/r0drv/solaris/vbi
- Timestamp:
- Mar 24, 2010 3:57:38 PM (15 years ago)
- Location:
- trunk/src/VBox/Runtime/r0drv/solaris/vbi
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Runtime/r0drv/solaris/vbi/i86pc/os/vbi.c
r27010 r27673 113 113 #if 0 114 114 static struct modlmisc vbi_modlmisc = { 115 &mod_miscops, "VirtualBox Interfaces V 6"115 &mod_miscops, "VirtualBox Interfaces V8" 116 116 }; 117 117 … … 1204 1204 1205 1205 /* 1206 * This is revision 5 of the interface. As more functions are added, 1207 * they should go after this point in the file and the revision level 1208 * increased. Also change vbi_modlmisc at the top of the file. 1209 */ 1210 uint_t vbi_revision_level = 7; 1206 * This is revision 5 of the interface. 1207 */ 1211 1208 1212 1209 void * … … 1250 1247 } 1251 1248 1249 1250 /* 1251 * This is revision 8 of the interface. 1252 */ 1253 1254 page_t ** 1255 vbi_pages_alloc(uint64_t *phys, size_t size) 1256 { 1257 /* 1258 * the page freelist and cachelist both hold pages that are not mapped into any address space. 1259 * the cachelist is not really free pages but when memory is exhausted they'll be moved to the 1260 * free lists. 1261 * it's the total of the free+cache list that we see on the 'free' column in vmstat. 1262 */ 1263 page_t **pp_pages = NULL; 1264 pgcnt_t npages = (size + PAGESIZE - 1) >> PAGESHIFT; 1265 1266 /* reserve available memory for pages */ 1267 int rc = page_resv(npages, KM_NOSLEEP); 1268 if (rc) 1269 { 1270 /* create the pages */ 1271 rc = page_create_wait(npages, 0 /* flags */); 1272 if (rc) 1273 { 1274 /* alloc space for page_t pointer array */ 1275 size_t pp_size = npages * sizeof(page_t *); 1276 pp_pages = kmem_zalloc(pp_size, KM_SLEEP); 1277 if (pp_pages) 1278 { 1279 /* get pages from kseg, the 'virtAddr' here is only for colouring (optimizing */ 1280 seg_t kernseg; 1281 kernseg.s_as = &kas; 1282 caddr_t virtAddr = NULL; 1283 for (pgcnt_t i = 0; i < npages; i++, virtAddr += PAGESIZE) 1284 { 1285 /* get a page from the freelist */ 1286 page_t *ppage = page_get_freelist(&kvp, 0 /* offset */, &kernseg, virtAddr, 1287 PAGESIZE, 0 /* flags */, NULL /* local group */); 1288 if (!ppage) 1289 { 1290 /* try from the cachelist */ 1291 ppage = page_get_cachelist(&kvp, 0 /* offset */, &kernseg, virtAddr, 1292 0 /* flags */, NULL /* local group */); 1293 if (!ppage) 1294 { 1295 /* damn */ 1296 page_create_putback(npages - i); 1297 while (--i >= 0) 1298 page_free(pp_pages[i], 0 /* don't need, move to tail */); 1299 kmem_free(pp_pages, pp_size); 1300 page_unresv(npages); 1301 return NULL; 1302 } 1303 1304 /* remove association with the vnode for pages from the cachelist */ 1305 if (!PP_ISAGED(ppage)) 1306 page_hashout(ppage, NULL /* mutex */); 1307 } 1308 1309 PP_CLRFREE(ppage); 1310 PP_CLRAGED(ppage); 1311 pp_pages[i] = ppage; 1312 } 1313 1314 /* 1315 * we now have the pages locked exclusively, before they are mapped in 1316 * we must downgrade the lock. 1317 */ 1318 *phys = (uint64_t)page_pptonum(pp_pages[0]) << PAGESHIFT; 1319 return pp_pages; 1320 } 1321 1322 page_create_putback(npages); 1323 } 1324 1325 page_unresv(npages); 1326 } 1327 1328 return NULL; 1329 } 1330 1331 1332 void 1333 vbi_pages_free(page_t **pp_pages, size_t size) 1334 { 1335 pgcnt_t npages = (size + PAGESIZE - 1) >> PAGESHIFT; 1336 size_t pp_size = npages * sizeof(page_t *); 1337 for (pgcnt_t i = 0; i < npages; i++) 1338 { 1339 /* we need to exclusive lock the pages before freeing them */ 1340 int rc = page_tryupgrade(pp_pages[i]); 1341 if (!rc) 1342 { 1343 page_unlock(pp_pages[i]); 1344 while (!page_lock(pp_pages[i], SE_EXCL, NULL /* mutex */, P_RECLAIM)) 1345 ; 1346 } 1347 1348 page_free(pp_pages[i], 0 /* don't need, move to tail */); 1349 } 1350 1351 kmem_free(pp_pages, pp_size); 1352 page_unresv(npages); 1353 } 1354 1355 1356 int 1357 vbi_pages_premap(page_t **pp_pages, size_t size, uint64_t *pphysaddrs) 1358 { 1359 if (!pphysaddrs) 1360 return -1; 1361 1362 pgcnt_t npages = (size + PAGESIZE - 1) >> PAGESHIFT; 1363 for (pgcnt_t i = 0; i < npages; i++) 1364 { 1365 /* 1366 * prepare pages for mapping into kernel/user space, we need to 1367 * downgrade the exclusive page lock to a shared lock. 1368 */ 1369 page_downgrade(pp_pages[i]); 1370 pphysaddrs[i] = vbi_page_to_pa(pp_pages, i); 1371 } 1372 1373 return 0; 1374 } 1375 1376 1377 uint64_t 1378 vbi_page_to_pa(page_t **pp_pages, pgcnt_t i) 1379 { 1380 pfn_t pfn = page_pptonum(pp_pages[i]); 1381 if (pfn == PFN_INVALID) 1382 panic("vbi_page_to_pa: page_pptonum() failed\n"); 1383 return (uint64_t)pfn << PAGESHIFT; 1384 } 1385 1386 /* 1387 * As more functions are added, they should start with a comment indicating 1388 * the revision and above this point in the file and the revision level should 1389 * be increased. Also change vbi_modlmisc at the top of the file. 1390 */ 1391 uint_t vbi_revision_level = 8; 1392 -
trunk/src/VBox/Runtime/r0drv/solaris/vbi/i86pc/sys/vbi.h
r27003 r27673 351 351 /* end of interfaces defined for version 7 */ 352 352 353 /* begin interfaces defined for version 8 */ 354 355 /* 356 * Allocate pages from the free/cache list without creating 357 * any kernel mapping of the memory. 358 * 359 * return value is a) NULL if memory unavailable or 360 * b) an allocated array of page_t structures to each page allocated. 361 * 362 * phys on input is set to the physical address of the first page allocated. 363 * 364 * size is the amount to allocate and must be a multiple of PAGESIZE 365 */ 366 extern page_t **vbi_pages_alloc(uint64_t *phys, size_t size); 367 368 /* 369 * Free pages allocated using vbi_pages_alloc() 370 */ 371 extern void vbi_pages_free(page_t **pp_pages, size_t size); 372 373 /* 374 * Prepare pages allocated via vbi_pages_alloc() to be mapped into 375 * user or kernel space. 376 * 377 * return value is 0 on success, non-zero on failure. 378 * 379 * size is the amount allocated from which number of pages in the page array 380 * will be computed. 381 * 382 * physaddrs on input is filled with the physical address of each corresponding page 383 * that can be mapped in. Size of the array pointed to by physaddrs must correspond 384 * to size. 385 */ 386 extern int vbi_pages_premap(page_t **pp_pages, size_t size, uint64_t *physaddrs); 387 388 /* 389 * Returns the physical address for the 'i'th page in the array of page 390 * structures in 'pp_pages' 391 */ 392 extern uint64_t vbi_page_to_pa(page_t **pp_pages, pgcnt_t i); 393 /* end of interfaces defined for version 8 */ 394 353 395 #ifdef __cplusplus 354 396 } -
trunk/src/VBox/Runtime/r0drv/solaris/vbi/memobj-r0drv-solaris.c
r27414 r27673 83 83 #if 0 84 84 vbi_phys_free(pMemSolaris->Core.pv, pMemSolaris->Core.cb); 85 #else 86 if (pMemSolaris->Core.u.Phys.fAllocated == true) 87 ddi_umem_free(pMemSolaris->Cookie); 88 else 89 vbi_pages_free(pMemSolaris->pvHandle, pMemSolaris->Core.cb); 85 90 #endif 86 ddi_umem_free(pMemSolaris->Cookie);87 91 break; 88 92 … … 183 187 * The contig_alloc() way of allocating NC pages is broken or does not match our semantics. Refer #4716 for details. 184 188 */ 185 caddr_t virtAddr = vbi_phys_alloc(&physAddr, cb, PAGE_SIZE, 0 /* non-contiguous */); 186 #endif 189 /* caddr_t virtAddr = vbi_phys_alloc(&physAddr, cb, PAGE_SIZE, 0 /* non-contiguous */); */ 187 190 caddr_t virtAddr = ddi_umem_alloc(cb, DDI_UMEM_SLEEP, &pMemSolaris->Cookie); 188 191 if (RT_UNLIKELY(virtAddr == NULL)) … … 191 194 return VERR_NO_MEMORY; 192 195 } 193 Assert(!(physAddr & PAGE_OFFSET_MASK));194 196 pMemSolaris->Core.pv = virtAddr; 195 197 pMemSolaris->Core.u.Phys.PhysBase = physAddr; 196 198 pMemSolaris->Core.u.Phys.fAllocated = true; 197 199 pMemSolaris->pvHandle = NULL; 200 #else 201 void *pvPages = vbi_pages_alloc(&physAddr, cb); 202 if (!pvPages) 203 { 204 LogRel(("rtR0MemObjNativeAllocPhysNC: vbi_pages_alloc failed.\n")); 205 rtR0MemObjDelete(&pMemSolaris->Core); 206 return VERR_NO_MEMORY; 207 } 208 Assert(!(physAddr & PAGE_OFFSET_MASK)); 209 pMemSolaris->Core.pv = NULL; 210 pMemSolaris->Core.u.Phys.PhysBase = physAddr; 211 pMemSolaris->Core.u.Phys.fAllocated = false; 212 pMemSolaris->pvHandle = pvPages; 213 #endif 214 198 215 *ppMem = &pMemSolaris->Core; 199 216 return VINF_SUCCESS; … … 387 404 /* Create the mapping object */ 388 405 PRTR0MEMOBJSOLARIS pMemSolaris = (PRTR0MEMOBJSOLARIS)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_MAPPING, pv, cb); 389 if ( !pMemSolaris)406 if (RT_UNLIKELY(!pMemSolaris)) 390 407 return VERR_NO_MEMORY; 391 408 392 409 uint64_t *paPhysAddrs = kmem_zalloc(sizeof(uint64_t) * cPages, KM_SLEEP); 393 for (pgcnt_t iPage = 0; iPage < cPages; iPage++) 394 { 395 paPhysAddrs[iPage] = vbi_va_to_pa(pv); 396 if (RT_UNLIKELY(paPhysAddrs[iPage] == -(uint64_t)1)) 410 if (RT_UNLIKELY(!paPhysAddrs)) 411 return VERR_NO_MEMORY; 412 413 if ( pMemToMapSolaris->Core.enmType == RTR0MEMOBJTYPE_PHYS_NC 414 && pMemSolaris->Core.u.Phys.fAllocated == false) 415 { 416 /* 417 * The PhysNC object has no kernel mapping backing it. The call to vbi_pages_premap() 418 * prepares the physical pages to be mapped into user or kernel space. 419 */ 420 LogRel(("calling premap\n")); 421 int rc = vbi_pages_premap(pMemToMapSolaris->pvHandle, cb, paPhysAddrs); 422 if (rc) 397 423 { 398 LogRel(("rtR0MemObjNativeMapUser: no page to map.\n"));424 LogRel(("rtR0MemObjNativeMapUser: vbi_pages_premap failed. rc=%d\n", rc)); 399 425 kmem_free(paPhysAddrs, sizeof(uint64_t) * cPages); 400 426 rtR0MemObjDelete(&pMemSolaris->Core); 401 427 return VERR_MAP_FAILED; 402 428 } 403 pv = (void *)((uintptr_t)pv + PAGE_SIZE); 429 } 430 else 431 { 432 /* 433 * All other memory object types have allocated memory with kernel mappings. 434 */ 435 for (pgcnt_t iPage = 0; iPage < cPages; iPage++) 436 { 437 paPhysAddrs[iPage] = vbi_va_to_pa(pv); 438 if (RT_UNLIKELY(paPhysAddrs[iPage] == -(uint64_t)1)) 439 { 440 LogRel(("rtR0MemObjNativeMapUser: no page to map.\n")); 441 kmem_free(paPhysAddrs, sizeof(uint64_t) * cPages); 442 rtR0MemObjDelete(&pMemSolaris->Core); 443 return VERR_MAP_FAILED; 444 } 445 pv = (void *)((uintptr_t)pv + PAGE_SIZE); 446 } 404 447 } 405 448 … … 465 508 return vbi_va_to_pa(pb); 466 509 } 467 return pMemSolaris->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT);510 return vbi_page_to_pa(pMemSolaris->pvHandle, iPage); 468 511 469 512 case RTR0MEMOBJTYPE_RES_VIRT:
Note:
See TracChangeset
for help on using the changeset viewer.