VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/solaris/memobj-r0drv-solaris.c@ 57181

Last change on this file since 57181 was 56290, checked in by vboxsync, 10 years ago

IPRT: Updated (C) year.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 39.6 KB
Line 
1/* $Id: memobj-r0drv-solaris.c 56290 2015-06-09 14:01:31Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, Solaris.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*******************************************************************************
29* Header Files *
30*******************************************************************************/
31#include "the-solaris-kernel.h"
32#include "internal/iprt.h"
33#include <iprt/memobj.h>
34
35#include <iprt/asm.h>
36#include <iprt/assert.h>
37#include <iprt/err.h>
38#include <iprt/log.h>
39#include <iprt/mem.h>
40#include <iprt/param.h>
41#include <iprt/process.h>
42#include "internal/memobj.h"
43#include "memobj-r0drv-solaris.h"
44
45/*******************************************************************************
46* Defined Constants And Macros *
47*******************************************************************************/
48#define SOL_IS_KRNL_ADDR(vx) ((uintptr_t)(vx) >= kernelbase)
49
50
51/*******************************************************************************
52* Structures and Typedefs *
53*******************************************************************************/
54/**
55 * The Solaris version of the memory object structure.
56 */
57typedef struct RTR0MEMOBJSOL
58{
59 /** The core structure. */
60 RTR0MEMOBJINTERNAL Core;
61 /** Pointer to kernel memory cookie. */
62 ddi_umem_cookie_t Cookie;
63 /** Shadow locked pages. */
64 void *pvHandle;
65 /** Access during locking. */
66 int fAccess;
67 /** Set if large pages are involved in an RTR0MEMOBJTYPE_PHYS
68 * allocation. */
69 bool fLargePage;
70 /** Whether we have individual pages or a kernel-mapped virtual memory block in
71 * an RTR0MEMOBJTYPE_PHYS_NC allocation. */
72 bool fIndivPages;
73} RTR0MEMOBJSOL, *PRTR0MEMOBJSOL;
74
75
76/*******************************************************************************
77* Global Variables *
78*******************************************************************************/
79static vnode_t g_PageVnode;
80static kmutex_t g_OffsetMtx;
81static u_offset_t g_offPage;
82
83static vnode_t g_LargePageVnode;
84static kmutex_t g_LargePageOffsetMtx;
85static u_offset_t g_offLargePage;
86static bool g_fLargePageNoReloc;
87
88
89/**
90 * Returns the physical address for a virtual address.
91 *
92 * @param pv The virtual address.
93 *
94 * @returns The physical address corresponding to @a pv.
95 */
96static uint64_t rtR0MemObjSolVirtToPhys(void *pv)
97{
98 struct hat *pHat = NULL;
99 pfn_t PageFrameNum = 0;
100 uintptr_t uVirtAddr = (uintptr_t)pv;
101
102 if (SOL_IS_KRNL_ADDR(pv))
103 pHat = kas.a_hat;
104 else
105 {
106 proc_t *pProcess = (proc_t *)RTR0ProcHandleSelf();
107 AssertRelease(pProcess);
108 pHat = pProcess->p_as->a_hat;
109 }
110
111 PageFrameNum = hat_getpfnum(pHat, (caddr_t)(uVirtAddr & PAGEMASK));
112 AssertReleaseMsg(PageFrameNum != PFN_INVALID, ("rtR0MemObjSolVirtToPhys failed. pv=%p\n", pv));
113 return (((uint64_t)PageFrameNum << PAGE_SHIFT) | (uVirtAddr & PAGE_OFFSET_MASK));
114}
115
116
117/**
118 * Returns the physical address for a page.
119 *
120 * @param pPage Pointer to the page.
121 *
122 * @returns The physical address for a page.
123 */
124static inline uint64_t rtR0MemObjSolPagePhys(page_t *pPage)
125{
126 AssertPtr(pPage);
127 pfn_t PageFrameNum = page_pptonum(pPage);
128 AssertReleaseMsg(PageFrameNum != PFN_INVALID, ("rtR0MemObjSolPagePhys failed pPage=%p\n"));
129 return (uint64_t)PageFrameNum << PAGE_SHIFT;
130}
131
132
133/**
134 * Allocates one page.
135 *
136 * @param virtAddr The virtual address to which this page maybe mapped in
137 * the future.
138 *
139 * @returns Pointer to the allocated page, NULL on failure.
140 */
141static page_t *rtR0MemObjSolPageAlloc(caddr_t virtAddr)
142{
143 u_offset_t offPage;
144 seg_t KernelSeg;
145
146 /*
147 * 16777215 terabytes of total memory for all VMs or
148 * restart 8000 1GB VMs 2147483 times until wraparound!
149 */
150 mutex_enter(&g_OffsetMtx);
151 AssertCompileSize(u_offset_t, sizeof(uint64_t)); NOREF(RTASSERTVAR);
152 g_offPage = RT_ALIGN_64(g_offPage, PAGE_SIZE) + PAGE_SIZE;
153 offPage = g_offPage;
154 mutex_exit(&g_OffsetMtx);
155
156 KernelSeg.s_as = &kas;
157 page_t *pPage = page_create_va(&g_PageVnode, offPage, PAGE_SIZE, PG_WAIT | PG_NORELOC, &KernelSeg, virtAddr);
158 if (RT_LIKELY(pPage))
159 {
160 /*
161 * Lock this page into memory "long term" to prevent this page from being paged out
162 * when we drop the page lock temporarily (during free). Downgrade to a shared lock
163 * to prevent page relocation.
164 */
165 page_pp_lock(pPage, 0 /* COW */, 1 /* Kernel */);
166 page_io_unlock(pPage);
167 page_downgrade(pPage);
168 Assert(PAGE_LOCKED_SE(pPage, SE_SHARED));
169 }
170
171 return pPage;
172}
173
174
175/**
176 * Destroys an allocated page.
177 *
178 * @param pPage Pointer to the page to be destroyed.
179 * @remarks This function expects page in @c pPage to be shared locked.
180 */
181static void rtR0MemObjSolPageDestroy(page_t *pPage)
182{
183 /*
184 * We need to exclusive lock the pages before freeing them, if upgrading the shared lock to exclusive fails,
185 * drop the page lock and look it up from the hash. Record the page offset before we drop the page lock as
186 * we cannot touch any page_t members once the lock is dropped.
187 */
188 AssertPtr(pPage);
189 Assert(PAGE_LOCKED_SE(pPage, SE_SHARED));
190
191 u_offset_t offPage = pPage->p_offset;
192 int rc = page_tryupgrade(pPage);
193 if (!rc)
194 {
195 page_unlock(pPage);
196 page_t *pFoundPage = page_lookup(&g_PageVnode, offPage, SE_EXCL);
197
198 /*
199 * Since we allocated the pages as PG_NORELOC we should only get back the exact page always.
200 */
201 AssertReleaseMsg(pFoundPage == pPage, ("Page lookup failed %p:%llx returned %p, expected %p\n",
202 &g_PageVnode, offPage, pFoundPage, pPage));
203 }
204 Assert(PAGE_LOCKED_SE(pPage, SE_EXCL));
205 page_pp_unlock(pPage, 0 /* COW */, 1 /* Kernel */);
206 page_destroy(pPage, 0 /* move it to the free list */);
207}
208
209
210/* Currently not used on 32-bits, define it to shut up gcc. */
211#if HC_ARCH_BITS == 64
212/**
213 * Allocates physical, non-contiguous memory of pages.
214 *
215 * @param puPhys Where to store the physical address of first page. Optional,
216 * can be NULL.
217 * @param cb The size of the allocation.
218 *
219 * @return Array of allocated pages, NULL on failure.
220 */
221static page_t **rtR0MemObjSolPagesAlloc(uint64_t *puPhys, size_t cb)
222{
223 /*
224 * VM1:
225 * The page freelist and cachelist both hold pages that are not mapped into any address space.
226 * The cachelist is not really free pages but when memory is exhausted they'll be moved to the
227 * free lists, it's the total of the free+cache list that we see on the 'free' column in vmstat.
228 *
229 * VM2:
230 * @todo Document what happens behind the scenes in VM2 regarding the free and cachelist.
231 */
232
233 /*
234 * Non-pageable memory reservation request for _4K pages, don't sleep.
235 */
236 size_t cPages = (cb + PAGE_SIZE - 1) >> PAGE_SHIFT;
237 int rc = page_resv(cPages, KM_NOSLEEP);
238 if (rc)
239 {
240 size_t cbPages = cPages * sizeof(page_t *);
241 page_t **ppPages = kmem_zalloc(cbPages, KM_SLEEP);
242 if (RT_LIKELY(ppPages))
243 {
244 /*
245 * Get pages from kseg, the 'virtAddr' here is only for colouring but unfortunately
246 * we don't yet have the 'virtAddr' to which this memory may be mapped.
247 */
248 caddr_t virtAddr = 0;
249 for (size_t i = 0; i < cPages; i++, virtAddr += PAGE_SIZE)
250 {
251 /*
252 * Get a page from the free list locked exclusively. The page will be named (hashed in)
253 * and we rely on it during free. The page we get will be shared locked to prevent the page
254 * from being relocated.
255 */
256 page_t *pPage = rtR0MemObjSolPageAlloc(virtAddr);
257 if (RT_UNLIKELY(!pPage))
258 {
259 /*
260 * No page found, release whatever pages we grabbed so far.
261 */
262 for (size_t k = 0; k < i; k++)
263 rtR0MemObjSolPageDestroy(ppPages[k]);
264 kmem_free(ppPages, cbPages);
265 page_unresv(cPages);
266 return NULL;
267 }
268
269 ppPages[i] = pPage;
270 }
271
272 if (puPhys)
273 *puPhys = rtR0MemObjSolPagePhys(ppPages[0]);
274 return ppPages;
275 }
276
277 page_unresv(cPages);
278 }
279
280 return NULL;
281}
282#endif /* HC_ARCH_BITS == 64 */
283
284
285/**
286 * Frees the allocates pages.
287 *
288 * @param ppPages Pointer to the page list.
289 * @param cbPages Size of the allocation.
290 */
291static void rtR0MemObjSolPagesFree(page_t **ppPages, size_t cb)
292{
293 size_t cPages = (cb + PAGE_SIZE - 1) >> PAGE_SHIFT;
294 size_t cbPages = cPages * sizeof(page_t *);
295 for (size_t iPage = 0; iPage < cPages; iPage++)
296 rtR0MemObjSolPageDestroy(ppPages[iPage]);
297
298 kmem_free(ppPages, cbPages);
299 page_unresv(cPages);
300}
301
302
303/**
304 * Allocates one large page.
305 *
306 * @param puPhys Where to store the physical address of the allocated
307 * page. Optional, can be NULL.
308 * @param cbLargePage Size of the large page.
309 *
310 * @returns Pointer to a list of pages that cover the large page, NULL on
311 * failure.
312 */
313static page_t **rtR0MemObjSolLargePageAlloc(uint64_t *puPhys, size_t cbLargePage)
314{
315 /*
316 * Check PG_NORELOC support for large pages. Using this helps prevent _1G page
317 * fragementation on systems that support it.
318 */
319 static bool fPageNoRelocChecked = false;
320 if (fPageNoRelocChecked == false)
321 {
322 fPageNoRelocChecked = true;
323 g_fLargePageNoReloc = false;
324 if ( g_pfnrtR0Sol_page_noreloc_supported
325 && g_pfnrtR0Sol_page_noreloc_supported(cbLargePage))
326 {
327 g_fLargePageNoReloc = true;
328 }
329 }
330
331 /*
332 * Non-pageable memory reservation request for _4K pages, don't sleep.
333 */
334 size_t cPages = (cbLargePage + PAGE_SIZE - 1) >> PAGE_SHIFT;
335 size_t cbPages = cPages * sizeof(page_t *);
336 u_offset_t offPage = 0;
337 int rc = page_resv(cPages, KM_NOSLEEP);
338 if (rc)
339 {
340 page_t **ppPages = kmem_zalloc(cbPages, KM_SLEEP);
341 if (RT_LIKELY(ppPages))
342 {
343 mutex_enter(&g_LargePageOffsetMtx);
344 AssertCompileSize(u_offset_t, sizeof(uint64_t)); NOREF(RTASSERTVAR);
345 g_offLargePage = RT_ALIGN_64(g_offLargePage, cbLargePage) + cbLargePage;
346 offPage = g_offLargePage;
347 mutex_exit(&g_LargePageOffsetMtx);
348
349 seg_t KernelSeg;
350 KernelSeg.s_as = &kas;
351 page_t *pRootPage = page_create_va_large(&g_LargePageVnode, offPage, cbLargePage,
352 PG_EXCL | (g_fLargePageNoReloc ? PG_NORELOC : 0), &KernelSeg,
353 0 /* vaddr */,NULL /* locality group */);
354 if (pRootPage)
355 {
356 /*
357 * Split it into sub-pages, downgrade each page to a shared lock to prevent page relocation.
358 */
359 page_t *pPageList = pRootPage;
360 for (size_t iPage = 0; iPage < cPages; iPage++)
361 {
362 page_t *pPage = pPageList;
363 AssertPtr(pPage);
364 AssertMsg(page_pptonum(pPage) == iPage + page_pptonum(pRootPage),
365 ("%p:%lx %lx+%lx\n", pPage, page_pptonum(pPage), iPage, page_pptonum(pRootPage)));
366 AssertMsg(pPage->p_szc == pRootPage->p_szc, ("Size code mismatch %p %d %d\n", pPage,
367 (int)pPage->p_szc, (int)pRootPage->p_szc));
368
369 /*
370 * Lock the page into memory "long term". This prevents callers of page_try_demote_pages() (such as the
371 * pageout scanner) from demoting the large page into smaller pages while we temporarily release the
372 * exclusive lock (during free). We pass "0, 1" since we've already accounted for availrmem during
373 * page_resv().
374 */
375 page_pp_lock(pPage, 0 /* COW */, 1 /* Kernel */);
376
377 page_sub(&pPageList, pPage);
378 page_io_unlock(pPage);
379 page_downgrade(pPage);
380 Assert(PAGE_LOCKED_SE(pPage, SE_SHARED));
381
382 ppPages[iPage] = pPage;
383 }
384 Assert(pPageList == NULL);
385 Assert(ppPages[0] == pRootPage);
386
387 uint64_t uPhys = rtR0MemObjSolPagePhys(pRootPage);
388 AssertMsg(!(uPhys & (cbLargePage - 1)), ("%llx %zx\n", uPhys, cbLargePage));
389 if (puPhys)
390 *puPhys = uPhys;
391 return ppPages;
392 }
393
394 /*
395 * Don't restore offPrev in case of failure (race condition), we have plenty of offset space.
396 * The offset must be unique (for the same vnode) or we'll encounter panics on page_create_va_large().
397 */
398 kmem_free(ppPages, cbPages);
399 }
400
401 page_unresv(cPages);
402 }
403 return NULL;
404}
405
406
407/**
408 * Frees the large page.
409 *
410 * @param ppPages Pointer to the list of small pages that cover the
411 * large page.
412 * @param cbLargePage Size of the allocation (i.e. size of the large
413 * page).
414 */
415static void rtR0MemObjSolLargePageFree(page_t **ppPages, size_t cbLargePage)
416{
417 Assert(ppPages);
418 Assert(cbLargePage > PAGE_SIZE);
419
420 bool fDemoted = false;
421 size_t cPages = (cbLargePage + PAGE_SIZE - 1) >> PAGE_SHIFT;
422 size_t cbPages = cPages * sizeof(page_t *);
423 page_t *pPageList = ppPages[0];
424
425 for (size_t iPage = 0; iPage < cPages; iPage++)
426 {
427 /*
428 * We need the pages exclusively locked, try upgrading the shared lock.
429 * If it fails, drop the shared page lock (cannot access any page_t members once this is done)
430 * and lookup the page from the page hash locking it exclusively.
431 */
432 page_t *pPage = ppPages[iPage];
433 u_offset_t offPage = pPage->p_offset;
434 int rc = page_tryupgrade(pPage);
435 if (!rc)
436 {
437 page_unlock(pPage);
438 page_t *pFoundPage = page_lookup(&g_LargePageVnode, offPage, SE_EXCL);
439 AssertRelease(pFoundPage);
440
441 if (g_fLargePageNoReloc)
442 {
443 /*
444 * This can only be guaranteed if PG_NORELOC is used while allocating the pages.
445 */
446 AssertReleaseMsg(pFoundPage == pPage,
447 ("lookup failed %p:%llu returned %p, expected %p\n", &g_LargePageVnode, offPage,
448 pFoundPage, pPage));
449 }
450
451 /*
452 * Check for page demotion (regardless of relocation). Some places in Solaris (e.g. VM1 page_retire())
453 * could possibly demote the large page to _4K pages between our call to page_unlock() and page_lookup().
454 */
455 if (page_get_pagecnt(pFoundPage->p_szc) == 1) /* Base size of only _4K associated with this page. */
456 fDemoted = true;
457 pPage = pFoundPage;
458 ppPages[iPage] = pFoundPage;
459 }
460 Assert(PAGE_LOCKED_SE(pPage, SE_EXCL));
461 page_pp_unlock(pPage, 0 /* COW */, 1 /* Kernel */);
462 }
463
464 if (fDemoted)
465 {
466 for (size_t iPage = 0; iPage < cPages; iPage++)
467 {
468 Assert(page_get_pagecnt(ppPages[iPage]->p_szc) == 1);
469 page_destroy(ppPages[iPage], 0 /* move it to the free list */);
470 }
471 }
472 else
473 {
474 /*
475 * Although we shred the adjacent pages in the linked list, page_destroy_pages works on
476 * adjacent pages via array increments. So this does indeed free all the pages.
477 */
478 AssertPtr(pPageList);
479 page_destroy_pages(pPageList);
480 }
481 kmem_free(ppPages, cbPages);
482 page_unresv(cPages);
483}
484
485
486/**
487 * Unmaps kernel/user-space mapped memory.
488 *
489 * @param pv Pointer to the mapped memory block.
490 * @param cb Size of the memory block.
491 */
492static void rtR0MemObjSolUnmap(void *pv, size_t cb)
493{
494 if (SOL_IS_KRNL_ADDR(pv))
495 {
496 hat_unload(kas.a_hat, pv, cb, HAT_UNLOAD | HAT_UNLOAD_UNLOCK);
497 vmem_free(heap_arena, pv, cb);
498 }
499 else
500 {
501 struct as *pAddrSpace = ((proc_t *)RTR0ProcHandleSelf())->p_as;
502 AssertPtr(pAddrSpace);
503 as_rangelock(pAddrSpace);
504 as_unmap(pAddrSpace, pv, cb);
505 as_rangeunlock(pAddrSpace);
506 }
507}
508
509
510/**
511 * Lock down memory mappings for a virtual address.
512 *
513 * @param pv Pointer to the memory to lock down.
514 * @param cb Size of the memory block.
515 * @param fAccess Page access rights (S_READ, S_WRITE, S_EXEC)
516 *
517 * @returns IPRT status code.
518 */
519static int rtR0MemObjSolLock(void *pv, size_t cb, int fPageAccess)
520{
521 /*
522 * Kernel memory mappings on x86/amd64 are always locked, only handle user-space memory.
523 */
524 if (!SOL_IS_KRNL_ADDR(pv))
525 {
526 proc_t *pProc = (proc_t *)RTR0ProcHandleSelf();
527 AssertPtr(pProc);
528 faultcode_t rc = as_fault(pProc->p_as->a_hat, pProc->p_as, (caddr_t)pv, cb, F_SOFTLOCK, fPageAccess);
529 if (rc)
530 {
531 LogRel(("rtR0MemObjSolLock failed for pv=%pv cb=%lx fPageAccess=%d rc=%d\n", pv, cb, fPageAccess, rc));
532 return VERR_LOCK_FAILED;
533 }
534 }
535 return VINF_SUCCESS;
536}
537
538
539/**
540 * Unlock memory mappings for a virtual address.
541 *
542 * @param pv Pointer to the locked memory.
543 * @param cb Size of the memory block.
544 * @param fPageAccess Page access rights (S_READ, S_WRITE, S_EXEC).
545 */
546static void rtR0MemObjSolUnlock(void *pv, size_t cb, int fPageAccess)
547{
548 if (!SOL_IS_KRNL_ADDR(pv))
549 {
550 proc_t *pProcess = (proc_t *)RTR0ProcHandleSelf();
551 AssertPtr(pProcess);
552 as_fault(pProcess->p_as->a_hat, pProcess->p_as, (caddr_t)pv, cb, F_SOFTUNLOCK, fPageAccess);
553 }
554}
555
556
557/**
558 * Maps a list of physical pages into user address space.
559 *
560 * @param pVirtAddr Where to store the virtual address of the mapping.
561 * @param fPageAccess Page access rights (PROT_READ, PROT_WRITE,
562 * PROT_EXEC)
563 * @param paPhysAddrs Array of physical addresses to pages.
564 * @param cb Size of memory being mapped.
565 *
566 * @returns IPRT status code.
567 */
568static int rtR0MemObjSolUserMap(caddr_t *pVirtAddr, unsigned fPageAccess, uint64_t *paPhysAddrs, size_t cb, size_t cbPageSize)
569{
570 struct as *pAddrSpace = ((proc_t *)RTR0ProcHandleSelf())->p_as;
571 int rc = VERR_INTERNAL_ERROR;
572 SEGVBOX_CRARGS Args;
573
574 Args.paPhysAddrs = paPhysAddrs;
575 Args.fPageAccess = fPageAccess;
576 Args.cbPageSize = cbPageSize;
577
578 as_rangelock(pAddrSpace);
579 map_addr(pVirtAddr, cb, 0 /* offset */, 0 /* vacalign */, MAP_SHARED);
580 if (*pVirtAddr != NULL)
581 rc = as_map(pAddrSpace, *pVirtAddr, cb, rtR0SegVBoxSolCreate, &Args);
582 else
583 rc = ENOMEM;
584 as_rangeunlock(pAddrSpace);
585
586 return RTErrConvertFromErrno(rc);
587}
588
589
590DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
591{
592 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)pMem;
593
594 switch (pMemSolaris->Core.enmType)
595 {
596 case RTR0MEMOBJTYPE_LOW:
597 rtR0SolMemFree(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
598 break;
599
600 case RTR0MEMOBJTYPE_PHYS:
601 if (pMemSolaris->Core.u.Phys.fAllocated)
602 {
603 if (pMemSolaris->fLargePage)
604 rtR0MemObjSolLargePageFree(pMemSolaris->pvHandle, pMemSolaris->Core.cb);
605 else
606 rtR0SolMemFree(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
607 }
608 break;
609
610 case RTR0MEMOBJTYPE_PHYS_NC:
611 if (pMemSolaris->fIndivPages)
612 rtR0MemObjSolPagesFree(pMemSolaris->pvHandle, pMemSolaris->Core.cb);
613 else
614 rtR0SolMemFree(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
615 break;
616
617 case RTR0MEMOBJTYPE_PAGE:
618 ddi_umem_free(pMemSolaris->Cookie);
619 break;
620
621 case RTR0MEMOBJTYPE_LOCK:
622 rtR0MemObjSolUnlock(pMemSolaris->Core.pv, pMemSolaris->Core.cb, pMemSolaris->fAccess);
623 break;
624
625 case RTR0MEMOBJTYPE_MAPPING:
626 rtR0MemObjSolUnmap(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
627 break;
628
629 case RTR0MEMOBJTYPE_RES_VIRT:
630 {
631 if (pMemSolaris->Core.u.ResVirt.R0Process == NIL_RTR0PROCESS)
632 vmem_xfree(heap_arena, pMemSolaris->Core.pv, pMemSolaris->Core.cb);
633 else
634 AssertFailed();
635 break;
636 }
637
638 case RTR0MEMOBJTYPE_CONT: /* we don't use this type here. */
639 default:
640 AssertMsgFailed(("enmType=%d\n", pMemSolaris->Core.enmType));
641 return VERR_INTERNAL_ERROR;
642 }
643
644 return VINF_SUCCESS;
645}
646
647
648DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
649{
650 /* Create the object. */
651 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PAGE, NULL, cb);
652 if (RT_UNLIKELY(!pMemSolaris))
653 return VERR_NO_MEMORY;
654
655 void *pvMem = ddi_umem_alloc(cb, DDI_UMEM_SLEEP, &pMemSolaris->Cookie);
656 if (RT_UNLIKELY(!pvMem))
657 {
658 rtR0MemObjDelete(&pMemSolaris->Core);
659 return VERR_NO_PAGE_MEMORY;
660 }
661
662 pMemSolaris->Core.pv = pvMem;
663 pMemSolaris->pvHandle = NULL;
664 *ppMem = &pMemSolaris->Core;
665 return VINF_SUCCESS;
666}
667
668
669DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
670{
671 NOREF(fExecutable);
672
673 /* Create the object */
674 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOW, NULL, cb);
675 if (!pMemSolaris)
676 return VERR_NO_MEMORY;
677
678 /* Allocate physically low page-aligned memory. */
679 uint64_t uPhysHi = _4G - 1;
680 void *pvMem = rtR0SolMemAlloc(uPhysHi, NULL /* puPhys */, cb, PAGE_SIZE, false /* fContig */);
681 if (RT_UNLIKELY(!pvMem))
682 {
683 rtR0MemObjDelete(&pMemSolaris->Core);
684 return VERR_NO_LOW_MEMORY;
685 }
686 pMemSolaris->Core.pv = pvMem;
687 pMemSolaris->pvHandle = NULL;
688 *ppMem = &pMemSolaris->Core;
689 return VINF_SUCCESS;
690}
691
692
693DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
694{
695 NOREF(fExecutable);
696 return rtR0MemObjNativeAllocPhys(ppMem, cb, _4G - 1, PAGE_SIZE /* alignment */);
697}
698
699
700DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
701{
702#if HC_ARCH_BITS == 64
703 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS_NC, NULL, cb);
704 if (RT_UNLIKELY(!pMemSolaris))
705 return VERR_NO_MEMORY;
706
707 if (PhysHighest == NIL_RTHCPHYS)
708 {
709 uint64_t PhysAddr = UINT64_MAX;
710 void *pvPages = rtR0MemObjSolPagesAlloc(&PhysAddr, cb);
711 if (!pvPages)
712 {
713 LogRel(("rtR0MemObjNativeAllocPhysNC: rtR0MemObjSolPagesAlloc failed for cb=%u.\n", cb));
714 rtR0MemObjDelete(&pMemSolaris->Core);
715 return VERR_NO_MEMORY;
716 }
717 Assert(PhysAddr != UINT64_MAX);
718 Assert(!(PhysAddr & PAGE_OFFSET_MASK));
719
720 pMemSolaris->Core.pv = NULL;
721 pMemSolaris->pvHandle = pvPages;
722 pMemSolaris->fIndivPages = true;
723 *ppMem = &pMemSolaris->Core;
724 return VINF_SUCCESS;
725 }
726 else
727 {
728 /*
729 * If we must satisfy an upper limit constraint, it isn't feasible to grab individual pages.
730 * We fall back to using contig_alloc().
731 */
732 uint64_t PhysAddr = UINT64_MAX;
733 void *pvMem = rtR0SolMemAlloc(PhysHighest, &PhysAddr, cb, PAGE_SIZE, false /* fContig */);
734 if (!pvMem)
735 {
736 LogRel(("rtR0MemObjNativeAllocPhysNC: rtR0SolMemAlloc failed for cb=%u PhysHighest=%RHp.\n", cb, PhysHighest));
737 rtR0MemObjDelete(&pMemSolaris->Core);
738 return VERR_NO_MEMORY;
739 }
740 Assert(PhysAddr != UINT64_MAX);
741 Assert(!(PhysAddr & PAGE_OFFSET_MASK));
742
743 pMemSolaris->Core.pv = pvMem;
744 pMemSolaris->pvHandle = NULL;
745 pMemSolaris->fIndivPages = false;
746 *ppMem = &pMemSolaris->Core;
747 return VINF_SUCCESS;
748 }
749
750#else /* 32 bit: */
751 return VERR_NOT_SUPPORTED; /* see the RTR0MemObjAllocPhysNC specs */
752#endif
753}
754
755
756DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
757{
758 AssertMsgReturn(PhysHighest >= 16 *_1M, ("PhysHigest=%RHp\n", PhysHighest), VERR_NOT_SUPPORTED);
759
760 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS, NULL, cb);
761 if (RT_UNLIKELY(!pMemSolaris))
762 return VERR_NO_MEMORY;
763
764 /*
765 * Allocating one large page gets special treatment.
766 */
767 static uint32_t s_cbLargePage = UINT32_MAX;
768 if (s_cbLargePage == UINT32_MAX)
769 {
770 if (page_num_pagesizes() > 1)
771 ASMAtomicWriteU32(&s_cbLargePage, page_get_pagesize(1)); /* Page-size code 1 maps to _2M on Solaris x86/amd64. */
772 else
773 ASMAtomicWriteU32(&s_cbLargePage, 0);
774 }
775
776 uint64_t PhysAddr;
777 if ( cb == s_cbLargePage
778 && cb == uAlignment
779 && PhysHighest == NIL_RTHCPHYS)
780 {
781 /*
782 * Allocate one large page (backed by physically contiguous memory).
783 */
784 void *pvPages = rtR0MemObjSolLargePageAlloc(&PhysAddr, cb);
785 if (RT_LIKELY(pvPages))
786 {
787 AssertMsg(!(PhysAddr & (cb - 1)), ("%RHp\n", PhysAddr));
788 pMemSolaris->Core.pv = NULL;
789 pMemSolaris->Core.u.Phys.PhysBase = PhysAddr;
790 pMemSolaris->Core.u.Phys.fAllocated = true;
791 pMemSolaris->pvHandle = pvPages;
792 pMemSolaris->fLargePage = true;
793
794 *ppMem = &pMemSolaris->Core;
795 return VINF_SUCCESS;
796 }
797 }
798 else
799 {
800 /*
801 * Allocate physically contiguous memory aligned as specified.
802 */
803 AssertCompile(NIL_RTHCPHYS == UINT64_MAX); NOREF(RTASSERTVAR);
804 PhysAddr = PhysHighest;
805 void *pvMem = rtR0SolMemAlloc(PhysHighest, &PhysAddr, cb, uAlignment, true /* fContig */);
806 if (RT_LIKELY(pvMem))
807 {
808 Assert(!(PhysAddr & PAGE_OFFSET_MASK));
809 Assert(PhysAddr < PhysHighest);
810 Assert(PhysAddr + cb <= PhysHighest);
811
812 pMemSolaris->Core.pv = pvMem;
813 pMemSolaris->Core.u.Phys.PhysBase = PhysAddr;
814 pMemSolaris->Core.u.Phys.fAllocated = true;
815 pMemSolaris->pvHandle = NULL;
816 pMemSolaris->fLargePage = false;
817
818 *ppMem = &pMemSolaris->Core;
819 return VINF_SUCCESS;
820 }
821 }
822 rtR0MemObjDelete(&pMemSolaris->Core);
823 return VERR_NO_CONT_MEMORY;
824}
825
826
827DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
828{
829 AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE, VERR_NOT_SUPPORTED);
830
831 /* Create the object. */
832 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS, NULL, cb);
833 if (!pMemSolaris)
834 return VERR_NO_MEMORY;
835
836 /* There is no allocation here, it needs to be mapped somewhere first. */
837 pMemSolaris->Core.u.Phys.fAllocated = false;
838 pMemSolaris->Core.u.Phys.PhysBase = Phys;
839 pMemSolaris->Core.u.Phys.uCachePolicy = uCachePolicy;
840 *ppMem = &pMemSolaris->Core;
841 return VINF_SUCCESS;
842}
843
844
845DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess,
846 RTR0PROCESS R0Process)
847{
848 AssertReturn(R0Process == RTR0ProcHandleSelf(), VERR_INVALID_PARAMETER);
849 NOREF(fAccess);
850
851 /* Create the locking object */
852 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb);
853 if (!pMemSolaris)
854 return VERR_NO_MEMORY;
855
856 /* Lock down user pages. */
857 int fPageAccess = S_READ;
858 if (fAccess & RTMEM_PROT_WRITE)
859 fPageAccess = S_WRITE;
860 if (fAccess & RTMEM_PROT_EXEC)
861 fPageAccess = S_EXEC;
862 int rc = rtR0MemObjSolLock((void *)R3Ptr, cb, fPageAccess);
863 if (RT_FAILURE(rc))
864 {
865 LogRel(("rtR0MemObjNativeLockUser: rtR0MemObjSolLock failed rc=%d\n", rc));
866 rtR0MemObjDelete(&pMemSolaris->Core);
867 return rc;
868 }
869
870 /* Fill in the object attributes and return successfully. */
871 pMemSolaris->Core.u.Lock.R0Process = R0Process;
872 pMemSolaris->pvHandle = NULL;
873 pMemSolaris->fAccess = fPageAccess;
874 *ppMem = &pMemSolaris->Core;
875 return VINF_SUCCESS;
876}
877
878
879DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
880{
881 NOREF(fAccess);
882
883 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOCK, pv, cb);
884 if (!pMemSolaris)
885 return VERR_NO_MEMORY;
886
887 /* Lock down kernel pages. */
888 int fPageAccess = S_READ;
889 if (fAccess & RTMEM_PROT_WRITE)
890 fPageAccess = S_WRITE;
891 if (fAccess & RTMEM_PROT_EXEC)
892 fPageAccess = S_EXEC;
893 int rc = rtR0MemObjSolLock(pv, cb, fPageAccess);
894 if (RT_FAILURE(rc))
895 {
896 LogRel(("rtR0MemObjNativeLockKernel: rtR0MemObjSolLock failed rc=%d\n", rc));
897 rtR0MemObjDelete(&pMemSolaris->Core);
898 return rc;
899 }
900
901 /* Fill in the object attributes and return successfully. */
902 pMemSolaris->Core.u.Lock.R0Process = NIL_RTR0PROCESS;
903 pMemSolaris->pvHandle = NULL;
904 pMemSolaris->fAccess = fPageAccess;
905 *ppMem = &pMemSolaris->Core;
906 return VINF_SUCCESS;
907}
908
909
910DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
911{
912 PRTR0MEMOBJSOL pMemSolaris;
913
914 /*
915 * Use xalloc.
916 */
917 void *pv = vmem_xalloc(heap_arena, cb, uAlignment, 0 /* phase */, 0 /* nocross */,
918 NULL /* minaddr */, NULL /* maxaddr */, VM_SLEEP);
919 if (RT_UNLIKELY(!pv))
920 return VERR_NO_MEMORY;
921
922 /* Create the object. */
923 pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_RES_VIRT, pv, cb);
924 if (!pMemSolaris)
925 {
926 LogRel(("rtR0MemObjNativeReserveKernel failed to alloc memory object.\n"));
927 vmem_xfree(heap_arena, pv, cb);
928 return VERR_NO_MEMORY;
929 }
930
931 pMemSolaris->Core.u.ResVirt.R0Process = NIL_RTR0PROCESS;
932 *ppMem = &pMemSolaris->Core;
933 return VINF_SUCCESS;
934}
935
936
937DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment,
938 RTR0PROCESS R0Process)
939{
940 return VERR_NOT_SUPPORTED;
941}
942
943
944DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
945 unsigned fProt, size_t offSub, size_t cbSub)
946{
947 /* Fail if requested to do something we can't. */
948 AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);
949 if (uAlignment > PAGE_SIZE)
950 return VERR_NOT_SUPPORTED;
951
952 /*
953 * Use xalloc to get address space.
954 */
955 if (!cbSub)
956 cbSub = pMemToMap->cb;
957 void *pv = vmem_xalloc(heap_arena, cbSub, uAlignment, 0 /* phase */, 0 /* nocross */,
958 NULL /* minaddr */, NULL /* maxaddr */, VM_SLEEP);
959 if (RT_UNLIKELY(!pv))
960 return VERR_MAP_FAILED;
961
962 /*
963 * Load the pages from the other object into it.
964 */
965 uint32_t fAttr = HAT_UNORDERED_OK | HAT_MERGING_OK | HAT_LOADCACHING_OK | HAT_STORECACHING_OK;
966 if (fProt & RTMEM_PROT_READ)
967 fAttr |= PROT_READ;
968 if (fProt & RTMEM_PROT_EXEC)
969 fAttr |= PROT_EXEC;
970 if (fProt & RTMEM_PROT_WRITE)
971 fAttr |= PROT_WRITE;
972 fAttr |= HAT_NOSYNC;
973
974 int rc = VINF_SUCCESS;
975 size_t off = 0;
976 while (off < cbSub)
977 {
978 RTHCPHYS HCPhys = RTR0MemObjGetPagePhysAddr(pMemToMap, (offSub + offSub) >> PAGE_SHIFT);
979 AssertBreakStmt(HCPhys != NIL_RTHCPHYS, rc = VERR_INTERNAL_ERROR_2);
980 pfn_t pfn = HCPhys >> PAGE_SHIFT;
981 AssertBreakStmt(((RTHCPHYS)pfn << PAGE_SHIFT) == HCPhys, rc = VERR_INTERNAL_ERROR_3);
982
983 hat_devload(kas.a_hat, (uint8_t *)pv + off, PAGE_SIZE, pfn, fAttr, HAT_LOAD_LOCK);
984
985 /* Advance. */
986 off += PAGE_SIZE;
987 }
988 if (RT_SUCCESS(rc))
989 {
990 /*
991 * Create a memory object for the mapping.
992 */
993 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_MAPPING, pv, cbSub);
994 if (pMemSolaris)
995 {
996 pMemSolaris->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
997 *ppMem = &pMemSolaris->Core;
998 return VINF_SUCCESS;
999 }
1000
1001 LogRel(("rtR0MemObjNativeMapKernel failed to alloc memory object.\n"));
1002 rc = VERR_NO_MEMORY;
1003 }
1004
1005 if (off)
1006 hat_unload(kas.a_hat, pv, off, HAT_UNLOAD | HAT_UNLOAD_UNLOCK);
1007 vmem_xfree(heap_arena, pv, cbSub);
1008 return rc;
1009}
1010
1011
1012DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, PRTR0MEMOBJINTERNAL pMemToMap, RTR3PTR R3PtrFixed,
1013 size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
1014{
1015 /*
1016 * Fend off things we cannot do.
1017 */
1018 AssertMsgReturn(R3PtrFixed == (RTR3PTR)-1, ("%p\n", R3PtrFixed), VERR_NOT_SUPPORTED);
1019 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
1020 if (uAlignment != PAGE_SIZE)
1021 return VERR_NOT_SUPPORTED;
1022
1023 /*
1024 * Get parameters from the source object.
1025 */
1026 PRTR0MEMOBJSOL pMemToMapSolaris = (PRTR0MEMOBJSOL)pMemToMap;
1027 void *pv = pMemToMapSolaris->Core.pv;
1028 size_t cb = pMemToMapSolaris->Core.cb;
1029 size_t cPages = (cb + PAGE_SIZE - 1) >> PAGE_SHIFT;
1030
1031 /*
1032 * Create the mapping object
1033 */
1034 PRTR0MEMOBJSOL pMemSolaris;
1035 pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_MAPPING, pv, cb);
1036 if (RT_UNLIKELY(!pMemSolaris))
1037 return VERR_NO_MEMORY;
1038
1039 int rc = VINF_SUCCESS;
1040 uint64_t *paPhysAddrs = kmem_zalloc(sizeof(uint64_t) * cPages, KM_SLEEP);
1041 if (RT_LIKELY(paPhysAddrs))
1042 {
1043 /*
1044 * Prepare the pages for mapping according to type.
1045 */
1046 if ( pMemToMapSolaris->Core.enmType == RTR0MEMOBJTYPE_PHYS_NC
1047 && pMemToMapSolaris->fIndivPages)
1048 {
1049 page_t **ppPages = pMemToMapSolaris->pvHandle;
1050 AssertPtr(ppPages);
1051 for (size_t iPage = 0; iPage < cPages; iPage++)
1052 paPhysAddrs[iPage] = rtR0MemObjSolPagePhys(ppPages[iPage]);
1053 }
1054 else if ( pMemToMapSolaris->Core.enmType == RTR0MEMOBJTYPE_PHYS
1055 && pMemToMapSolaris->fLargePage)
1056 {
1057 RTHCPHYS Phys = pMemToMapSolaris->Core.u.Phys.PhysBase;
1058 for (size_t iPage = 0; iPage < cPages; iPage++, Phys += PAGE_SIZE)
1059 paPhysAddrs[iPage] = Phys;
1060 }
1061 else
1062 {
1063 /*
1064 * Have kernel mapping, just translate virtual to physical.
1065 */
1066 AssertPtr(pv);
1067 rc = VINF_SUCCESS;
1068 for (size_t iPage = 0; iPage < cPages; iPage++)
1069 {
1070 paPhysAddrs[iPage] = rtR0MemObjSolVirtToPhys(pv);
1071 if (RT_UNLIKELY(paPhysAddrs[iPage] == -(uint64_t)1))
1072 {
1073 LogRel(("rtR0MemObjNativeMapUser: no page to map.\n"));
1074 rc = VERR_MAP_FAILED;
1075 break;
1076 }
1077 pv = (void *)((uintptr_t)pv + PAGE_SIZE);
1078 }
1079 }
1080 if (RT_SUCCESS(rc))
1081 {
1082 unsigned fPageAccess = PROT_READ;
1083 if (fProt & RTMEM_PROT_WRITE)
1084 fPageAccess |= PROT_WRITE;
1085 if (fProt & RTMEM_PROT_EXEC)
1086 fPageAccess |= PROT_EXEC;
1087
1088 /*
1089 * Perform the actual mapping.
1090 */
1091 caddr_t UserAddr = NULL;
1092 rc = rtR0MemObjSolUserMap(&UserAddr, fPageAccess, paPhysAddrs, cb, PAGE_SIZE);
1093 if (RT_SUCCESS(rc))
1094 {
1095 pMemSolaris->Core.u.Mapping.R0Process = R0Process;
1096 pMemSolaris->Core.pv = UserAddr;
1097
1098 *ppMem = &pMemSolaris->Core;
1099 kmem_free(paPhysAddrs, sizeof(uint64_t) * cPages);
1100 return VINF_SUCCESS;
1101 }
1102
1103 LogRel(("rtR0MemObjNativeMapUser: rtR0MemObjSolUserMap failed rc=%d.\n", rc));
1104 }
1105
1106 rc = VERR_MAP_FAILED;
1107 kmem_free(paPhysAddrs, sizeof(uint64_t) * cPages);
1108 }
1109 else
1110 rc = VERR_NO_MEMORY;
1111 rtR0MemObjDelete(&pMemSolaris->Core);
1112 return rc;
1113}
1114
1115
1116DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
1117{
1118 NOREF(pMem);
1119 NOREF(offSub);
1120 NOREF(cbSub);
1121 NOREF(fProt);
1122 return VERR_NOT_SUPPORTED;
1123}
1124
1125
1126DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
1127{
1128 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)pMem;
1129
1130 switch (pMemSolaris->Core.enmType)
1131 {
1132 case RTR0MEMOBJTYPE_PHYS_NC:
1133 if ( pMemSolaris->Core.u.Phys.fAllocated
1134 || !pMemSolaris->fIndivPages)
1135 {
1136 uint8_t *pb = (uint8_t *)pMemSolaris->Core.pv + ((size_t)iPage << PAGE_SHIFT);
1137 return rtR0MemObjSolVirtToPhys(pb);
1138 }
1139 page_t **ppPages = pMemSolaris->pvHandle;
1140 return rtR0MemObjSolPagePhys(ppPages[iPage]);
1141
1142 case RTR0MEMOBJTYPE_PAGE:
1143 case RTR0MEMOBJTYPE_LOW:
1144 case RTR0MEMOBJTYPE_LOCK:
1145 {
1146 uint8_t *pb = (uint8_t *)pMemSolaris->Core.pv + ((size_t)iPage << PAGE_SHIFT);
1147 return rtR0MemObjSolVirtToPhys(pb);
1148 }
1149
1150 /*
1151 * Although mapping can be handled by rtR0MemObjSolVirtToPhys(offset) like the above case,
1152 * request it from the parent so that we have a clear distinction between CONT/PHYS_NC.
1153 */
1154 case RTR0MEMOBJTYPE_MAPPING:
1155 return rtR0MemObjNativeGetPagePhysAddr(pMemSolaris->Core.uRel.Child.pParent, iPage);
1156
1157 case RTR0MEMOBJTYPE_CONT:
1158 case RTR0MEMOBJTYPE_PHYS:
1159 AssertFailed(); /* handled by the caller */
1160 case RTR0MEMOBJTYPE_RES_VIRT:
1161 default:
1162 return NIL_RTHCPHYS;
1163 }
1164}
1165
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette