VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/solaris/memobj-r0drv-solaris.c@ 42840

Last change on this file since 42840 was 41968, checked in by vboxsync, 13 years ago

Runtime/r0drv/solaris: comments, spaces.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 39.4 KB
Line 
1/* $Id: memobj-r0drv-solaris.c 41968 2012-06-29 03:36:30Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, Solaris.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*******************************************************************************
29* Header Files *
30*******************************************************************************/
31#include "the-solaris-kernel.h"
32#include "internal/iprt.h"
33#include <iprt/memobj.h>
34
35#include <iprt/asm.h>
36#include <iprt/assert.h>
37#include <iprt/err.h>
38#include <iprt/log.h>
39#include <iprt/mem.h>
40#include <iprt/param.h>
41#include <iprt/process.h>
42#include "internal/memobj.h"
43#include "memobj-r0drv-solaris.h"
44
45/*******************************************************************************
46* Defined Constants And Macros *
47*******************************************************************************/
48#define SOL_IS_KRNL_ADDR(vx) ((uintptr_t)(vx) >= kernelbase)
49
50
51/*******************************************************************************
52* Structures and Typedefs *
53*******************************************************************************/
54/**
55 * The Solaris version of the memory object structure.
56 */
57typedef struct RTR0MEMOBJSOL
58{
59 /** The core structure. */
60 RTR0MEMOBJINTERNAL Core;
61 /** Pointer to kernel memory cookie. */
62 ddi_umem_cookie_t Cookie;
63 /** Shadow locked pages. */
64 void *pvHandle;
65 /** Access during locking. */
66 int fAccess;
67 /** Set if large pages are involved in an RTR0MEMOBJTYPE_PHYS
68 * allocation. */
69 bool fLargePage;
70 /** Whether we have individual pages or a kernel-mapped virtual memory block in
71 * an RTR0MEMOBJTYPE_PHYS_NC allocation. */
72 bool fIndivPages;
73} RTR0MEMOBJSOL, *PRTR0MEMOBJSOL;
74
75
76/*******************************************************************************
77* Global Variables *
78*******************************************************************************/
79static vnode_t g_PageVnode;
80static kmutex_t g_OffsetMtx;
81static u_offset_t g_offPage;
82
83static vnode_t g_LargePageVnode;
84static kmutex_t g_LargePageOffsetMtx;
85static u_offset_t g_offLargePage;
86static bool g_fLargePageNoReloc;
87
88
89/**
90 * Returns the physical address for a virtual address.
91 *
92 * @param pv The virtual address.
93 *
94 * @returns The physical address corresponding to @a pv.
95 */
96static uint64_t rtR0MemObjSolVirtToPhys(void *pv)
97{
98 struct hat *pHat = NULL;
99 pfn_t PageFrameNum = 0;
100 uintptr_t uVirtAddr = (uintptr_t)pv;
101
102 if (SOL_IS_KRNL_ADDR(pv))
103 pHat = kas.a_hat;
104 else
105 {
106 proc_t *pProcess = (proc_t *)RTR0ProcHandleSelf();
107 AssertRelease(pProcess);
108 pHat = pProcess->p_as->a_hat;
109 }
110
111 PageFrameNum = hat_getpfnum(pHat, (caddr_t)(uVirtAddr & PAGEMASK));
112 AssertReleaseMsg(PageFrameNum != PFN_INVALID, ("rtR0MemObjSolVirtToPhys failed. pv=%p\n", pv));
113 return (((uint64_t)PageFrameNum << PAGE_SHIFT) | (uVirtAddr & PAGE_OFFSET_MASK));
114}
115
116
117/**
118 * Returns the physical address for a page.
119 *
120 * @param pPage Pointer to the page.
121 *
122 * @returns The physical address for a page.
123 */
124static inline uint64_t rtR0MemObjSolPagePhys(page_t *pPage)
125{
126 AssertPtr(pPage);
127 pfn_t PageFrameNum = page_pptonum(pPage);
128 AssertReleaseMsg(PageFrameNum != PFN_INVALID, ("rtR0MemObjSolPagePhys failed pPage=%p\n"));
129 return (uint64_t)PageFrameNum << PAGE_SHIFT;
130}
131
132
133/**
134 * Allocates one page.
135 *
136 * @param virtAddr The virtual address to which this page maybe mapped in
137 * the future.
138 *
139 * @returns Pointer to the allocated page, NULL on failure.
140 */
141static page_t *rtR0MemObjSolPageAlloc(caddr_t virtAddr)
142{
143 u_offset_t offPage;
144 seg_t KernelSeg;
145
146 mutex_enter(&g_OffsetMtx);
147 AssertCompileSize(u_offset_t, sizeof(uint64_t)); NOREF(RTASSERTVAR);
148 g_offPage = RT_ALIGN_64(g_offPage, PAGE_SIZE) + PAGE_SIZE;
149 offPage = g_offPage;
150 mutex_exit(&g_OffsetMtx);
151
152 KernelSeg.s_as = &kas;
153 page_t *pPage = page_create_va(&g_PageVnode, offPage, PAGE_SIZE, PG_WAIT | PG_NORELOC, &KernelSeg, virtAddr);
154 if (RT_LIKELY(pPage))
155 {
156 /*
157 * Lock this page into memory "long term" to prevent this page from being paged out
158 * when we drop the page lock temporarily (during free). Downgrade to a shared lock
159 * to prevent page relocation.
160 */
161 page_pp_lock(pPage, 0 /* COW */, 1 /* Kernel */);
162 page_io_unlock(pPage);
163 page_downgrade(pPage);
164 Assert(PAGE_LOCKED_SE(pPage, SE_SHARED));
165 }
166
167 return pPage;
168}
169
170
171/**
172 * Destroys an allocated page.
173 *
174 * @param pPage Pointer to the page to be destroyed.
175 * @remarks This function expects page in @c pPage to be shared locked.
176 */
177static void rtR0MemObjSolPageDestroy(page_t *pPage)
178{
179 /*
180 * We need to exclusive lock the pages before freeing them, if upgrading the shared lock to exclusive fails,
181 * drop the page lock and look it up from the hash. Record the page offset before we drop the page lock as
182 * we cannot touch any page_t members once the lock is dropped.
183 */
184 AssertPtr(pPage);
185 Assert(PAGE_LOCKED_SE(pPage, SE_SHARED));
186
187 u_offset_t offPage = pPage->p_offset;
188 int rc = page_tryupgrade(pPage);
189 if (!rc)
190 {
191 page_unlock(pPage);
192 page_t *pFoundPage = page_lookup(&g_PageVnode, offPage, SE_EXCL);
193
194 /*
195 * Since we allocated the pages as PG_NORELOC we should only get back the exact page always.
196 */
197 AssertReleaseMsg(pFoundPage == pPage, ("Page lookup failed %p:%llx returned %p, expected %p\n",
198 &g_PageVnode, offPage, pFoundPage, pPage));
199 }
200 Assert(PAGE_LOCKED_SE(pPage, SE_EXCL));
201 page_pp_unlock(pPage, 0 /* COW */, 1 /* Kernel */);
202 page_destroy(pPage, 0 /* move it to the free list */);
203}
204
205
206/**
207 * Allocates physical, non-contiguous memory of pages.
208 *
209 * @param puPhys Where to store the physical address of first page. Optional,
210 * can be NULL.
211 * @param cb The size of the allocation.
212 *
213 * @return Array of allocated pages, NULL on failure.
214 */
215static page_t **rtR0MemObjSolPagesAlloc(uint64_t *puPhys, size_t cb)
216{
217 /*
218 * VM1:
219 * The page freelist and cachelist both hold pages that are not mapped into any address space.
220 * The cachelist is not really free pages but when memory is exhausted they'll be moved to the
221 * free lists, it's the total of the free+cache list that we see on the 'free' column in vmstat.
222 *
223 * VM2:
224 * @todo Document what happens behind the scenes in VM2 regarding the free and cachelist.
225 */
226
227 /*
228 * Non-pageable memory reservation request for _4K pages, don't sleep.
229 */
230 size_t cPages = (cb + PAGE_SIZE - 1) >> PAGE_SHIFT;
231 int rc = page_resv(cPages, KM_NOSLEEP);
232 if (rc)
233 {
234 size_t cbPages = cPages * sizeof(page_t *);
235 page_t **ppPages = kmem_zalloc(cbPages, KM_SLEEP);
236 if (RT_LIKELY(ppPages))
237 {
238 /*
239 * Get pages from kseg, the 'virtAddr' here is only for colouring but unfortunately
240 * we don't yet have the 'virtAddr' to which this memory may be mapped.
241 */
242 caddr_t virtAddr = 0;
243 for (size_t i = 0; i < cPages; i++, virtAddr += PAGE_SIZE)
244 {
245 /*
246 * Get a page from the free list locked exclusively. The page will be named (hashed in)
247 * and we rely on it during free. The page we get will be shared locked to prevent the page
248 * from being relocated.
249 */
250 page_t *pPage = rtR0MemObjSolPageAlloc(virtAddr);
251 if (RT_UNLIKELY(!pPage))
252 {
253 /*
254 * No page found, release whatever pages we grabbed so far.
255 */
256 for (size_t k = 0; k < i; k++)
257 rtR0MemObjSolPageDestroy(ppPages[k]);
258 kmem_free(ppPages, cbPages);
259 page_unresv(cPages);
260 return NULL;
261 }
262
263 ppPages[i] = pPage;
264 }
265
266 if (puPhys)
267 *puPhys = rtR0MemObjSolPagePhys(ppPages[0]);
268 return ppPages;
269 }
270
271 page_unresv(cPages);
272 }
273
274 return NULL;
275}
276
277
278/**
279 * Frees the allocates pages.
280 *
281 * @param ppPages Pointer to the page list.
282 * @param cbPages Size of the allocation.
283 */
284static void rtR0MemObjSolPagesFree(page_t **ppPages, size_t cb)
285{
286 size_t cPages = (cb + PAGE_SIZE - 1) >> PAGE_SHIFT;
287 size_t cbPages = cPages * sizeof(page_t *);
288 for (size_t iPage = 0; iPage < cPages; iPage++)
289 rtR0MemObjSolPageDestroy(ppPages[iPage]);
290
291 kmem_free(ppPages, cbPages);
292 page_unresv(cPages);
293}
294
295
296/**
297 * Allocates one large page.
298 *
299 * @param puPhys Where to store the physical address of the allocated
300 * page. Optional, can be NULL.
301 * @param cbLargePage Size of the large page.
302 *
303 * @returns Pointer to a list of pages that cover the large page, NULL on
304 * failure.
305 */
306static page_t **rtR0MemObjSolLargePageAlloc(uint64_t *puPhys, size_t cbLargePage)
307{
308 /*
309 * Check PG_NORELOC support for large pages. Using this helps prevent _1G page
310 * fragementation on systems that support it.
311 */
312 static bool fPageNoRelocChecked = false;
313 if (fPageNoRelocChecked == false)
314 {
315 fPageNoRelocChecked = true;
316 g_fLargePageNoReloc = false;
317 if ( g_pfnrtR0Sol_page_noreloc_supported
318 && g_pfnrtR0Sol_page_noreloc_supported(cbLargePage))
319 {
320 g_fLargePageNoReloc = true;
321 }
322 }
323
324 /*
325 * Non-pageable memory reservation request for _4K pages, don't sleep.
326 */
327 size_t cPages = (cbLargePage + PAGE_SIZE - 1) >> PAGE_SHIFT;
328 size_t cbPages = cPages * sizeof(page_t *);
329 u_offset_t offPage = 0;
330 int rc = page_resv(cPages, KM_NOSLEEP);
331 if (rc)
332 {
333 page_t **ppPages = kmem_zalloc(cbPages, KM_SLEEP);
334 if (RT_LIKELY(ppPages))
335 {
336 mutex_enter(&g_LargePageOffsetMtx);
337 AssertCompileSize(u_offset_t, sizeof(uint64_t)); NOREF(RTASSERTVAR);
338 g_offLargePage = RT_ALIGN_64(g_offLargePage, cbLargePage) + cbLargePage;
339 offPage = g_offLargePage;
340 mutex_exit(&g_LargePageOffsetMtx);
341
342 seg_t KernelSeg;
343 KernelSeg.s_as = &kas;
344 page_t *pRootPage = page_create_va_large(&g_LargePageVnode, offPage, cbLargePage,
345 PG_EXCL | (g_fLargePageNoReloc ? PG_NORELOC : 0), &KernelSeg,
346 0 /* vaddr */,NULL /* locality group */);
347 if (pRootPage)
348 {
349 /*
350 * Split it into sub-pages, downgrade each page to a shared lock to prevent page relocation.
351 */
352 page_t *pPageList = pRootPage;
353 for (size_t iPage = 0; iPage < cPages; iPage++)
354 {
355 page_t *pPage = pPageList;
356 AssertPtr(pPage);
357 AssertMsg(page_pptonum(pPage) == iPage + page_pptonum(pRootPage),
358 ("%p:%lx %lx+%lx\n", pPage, page_pptonum(pPage), iPage, page_pptonum(pRootPage)));
359 AssertMsg(pPage->p_szc == pRootPage->p_szc, ("Size code mismatch %p %d %d\n", pPage,
360 (int)pPage->p_szc, (int)pRootPage->p_szc));
361
362 /*
363 * Lock the page into memory "long term". This prevents callers of page_try_demote_pages() (such as the
364 * pageout scanner) from demoting the large page into smaller pages while we temporarily release the
365 * exclusive lock (during free). We pass "0, 1" since we've already accounted for availrmem during
366 * page_resv().
367 */
368 page_pp_lock(pPage, 0 /* COW */, 1 /* Kernel */);
369
370 page_sub(&pPageList, pPage);
371 page_io_unlock(pPage);
372 page_downgrade(pPage);
373 Assert(PAGE_LOCKED_SE(pPage, SE_SHARED));
374
375 ppPages[iPage] = pPage;
376 }
377 Assert(pPageList == NULL);
378 Assert(ppPages[0] == pRootPage);
379
380 uint64_t uPhys = rtR0MemObjSolPagePhys(pRootPage);
381 AssertMsg(!(uPhys & (cbLargePage - 1)), ("%llx %zx\n", uPhys, cbLargePage));
382 if (puPhys)
383 *puPhys = uPhys;
384 return ppPages;
385 }
386
387 /*
388 * Don't restore offPrev in case of failure (race condition), we have plenty of offset space.
389 * The offset must be unique (for the same vnode) or we'll encounter panics on page_create_va_large().
390 */
391 kmem_free(ppPages, cbPages);
392 }
393
394 page_unresv(cPages);
395 }
396 return NULL;
397}
398
399
400/**
401 * Frees the large page.
402 *
403 * @param ppPages Pointer to the list of small pages that cover the
404 * large page.
405 * @param cbLargePage Size of the allocation (i.e. size of the large
406 * page).
407 */
408static void rtR0MemObjSolLargePageFree(page_t **ppPages, size_t cbLargePage)
409{
410 Assert(ppPages);
411 Assert(cbLargePage > PAGE_SIZE);
412
413 bool fDemoted = false;
414 size_t cPages = (cbLargePage + PAGE_SIZE - 1) >> PAGE_SHIFT;
415 size_t cbPages = cPages * sizeof(page_t *);
416 page_t *pPageList = ppPages[0];
417
418 for (size_t iPage = 0; iPage < cPages; iPage++)
419 {
420 /*
421 * We need the pages exclusively locked, try upgrading the shared lock.
422 * If it fails, drop the shared page lock (cannot access any page_t members once this is done)
423 * and lookup the page from the page hash locking it exclusively.
424 */
425 page_t *pPage = ppPages[iPage];
426 u_offset_t offPage = pPage->p_offset;
427 int rc = page_tryupgrade(pPage);
428 if (!rc)
429 {
430 page_unlock(pPage);
431 page_t *pFoundPage = page_lookup(&g_LargePageVnode, offPage, SE_EXCL);
432 AssertRelease(pFoundPage);
433
434 if (g_fLargePageNoReloc)
435 {
436 /*
437 * This can only be guaranteed if PG_NORELOC is used while allocating the pages.
438 */
439 AssertReleaseMsg(pFoundPage == pPage,
440 ("lookup failed %p:%llu returned %p, expected %p\n", &g_LargePageVnode, offPage,
441 pFoundPage, pPage));
442 }
443
444 /*
445 * Check for page demotion (regardless of relocation). Some places in Solaris (e.g. VM1 page_retire())
446 * could possibly demote the large page to _4K pages between our call to page_unlock() and page_lookup().
447 */
448 if (page_get_pagecnt(pFoundPage->p_szc) == 1) /* Base size of only _4K associated with this page. */
449 fDemoted = true;
450 pPage = pFoundPage;
451 ppPages[iPage] = pFoundPage;
452 }
453 Assert(PAGE_LOCKED_SE(pPage, SE_EXCL));
454 page_pp_unlock(pPage, 0 /* COW */, 1 /* Kernel */);
455 }
456
457 if (fDemoted)
458 {
459 for (size_t iPage = 0; iPage < cPages; iPage++)
460 {
461 Assert(page_get_pagecnt(ppPages[iPage]->p_szc) == 1);
462 page_destroy(ppPages[iPage], 0 /* move it to the free list */);
463 }
464 }
465 else
466 {
467 /*
468 * Although we shred the adjacent pages in the linked list, page_destroy_pages works on
469 * adjacent pages via array increments. So this does indeed free all the pages.
470 */
471 AssertPtr(pPageList);
472 page_destroy_pages(pPageList);
473 }
474 kmem_free(ppPages, cbPages);
475 page_unresv(cPages);
476}
477
478
479/**
480 * Unmaps kernel/user-space mapped memory.
481 *
482 * @param pv Pointer to the mapped memory block.
483 * @param cb Size of the memory block.
484 */
485static void rtR0MemObjSolUnmap(void *pv, size_t cb)
486{
487 if (SOL_IS_KRNL_ADDR(pv))
488 {
489 hat_unload(kas.a_hat, pv, cb, HAT_UNLOAD | HAT_UNLOAD_UNLOCK);
490 vmem_free(heap_arena, pv, cb);
491 }
492 else
493 {
494 struct as *pAddrSpace = ((proc_t *)RTR0ProcHandleSelf())->p_as;
495 AssertPtr(pAddrSpace);
496 as_rangelock(pAddrSpace);
497 as_unmap(pAddrSpace, pv, cb);
498 as_rangeunlock(pAddrSpace);
499 }
500}
501
502
503/**
504 * Lock down memory mappings for a virtual address.
505 *
506 * @param pv Pointer to the memory to lock down.
507 * @param cb Size of the memory block.
508 * @param fAccess Page access rights (S_READ, S_WRITE, S_EXEC)
509 *
510 * @returns IPRT status code.
511 */
512static int rtR0MemObjSolLock(void *pv, size_t cb, int fPageAccess)
513{
514 /*
515 * Kernel memory mappings on x86/amd64 are always locked, only handle user-space memory.
516 */
517 if (!SOL_IS_KRNL_ADDR(pv))
518 {
519 proc_t *pProc = (proc_t *)RTR0ProcHandleSelf();
520 AssertPtr(pProc);
521 faultcode_t rc = as_fault(pProc->p_as->a_hat, pProc->p_as, (caddr_t)pv, cb, F_SOFTLOCK, fPageAccess);
522 if (rc)
523 {
524 LogRel(("rtR0MemObjSolLock failed for pv=%pv cb=%lx fPageAccess=%d rc=%d\n", pv, cb, fPageAccess, rc));
525 return VERR_LOCK_FAILED;
526 }
527 }
528 return VINF_SUCCESS;
529}
530
531
532/**
533 * Unlock memory mappings for a virtual address.
534 *
535 * @param pv Pointer to the locked memory.
536 * @param cb Size of the memory block.
537 * @param fPageAccess Page access rights (S_READ, S_WRITE, S_EXEC).
538 */
539static void rtR0MemObjSolUnlock(void *pv, size_t cb, int fPageAccess)
540{
541 if (!SOL_IS_KRNL_ADDR(pv))
542 {
543 proc_t *pProcess = (proc_t *)RTR0ProcHandleSelf();
544 AssertPtr(pProcess);
545 as_fault(pProcess->p_as->a_hat, pProcess->p_as, (caddr_t)pv, cb, F_SOFTUNLOCK, fPageAccess);
546 }
547}
548
549
550/**
551 * Maps a list of physical pages into user address space.
552 *
553 * @param pVirtAddr Where to store the virtual address of the mapping.
554 * @param fPageAccess Page access rights (PROT_READ, PROT_WRITE,
555 * PROT_EXEC)
556 * @param paPhysAddrs Array of physical addresses to pages.
557 * @param cb Size of memory being mapped.
558 *
559 * @returns IPRT status code.
560 */
561static int rtR0MemObjSolUserMap(caddr_t *pVirtAddr, unsigned fPageAccess, uint64_t *paPhysAddrs, size_t cb, size_t cbPageSize)
562{
563 struct as *pAddrSpace = ((proc_t *)RTR0ProcHandleSelf())->p_as;
564 int rc = VERR_INTERNAL_ERROR;
565 SEGVBOX_CRARGS Args;
566
567 Args.paPhysAddrs = paPhysAddrs;
568 Args.fPageAccess = fPageAccess;
569 Args.cbPageSize = cbPageSize;
570
571 as_rangelock(pAddrSpace);
572 map_addr(pVirtAddr, cb, 0 /* offset */, 0 /* vacalign */, MAP_SHARED);
573 if (*pVirtAddr != NULL)
574 rc = as_map(pAddrSpace, *pVirtAddr, cb, rtR0SegVBoxSolCreate, &Args);
575 else
576 rc = ENOMEM;
577 as_rangeunlock(pAddrSpace);
578
579 return RTErrConvertFromErrno(rc);
580}
581
582
583DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
584{
585 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)pMem;
586
587 switch (pMemSolaris->Core.enmType)
588 {
589 case RTR0MEMOBJTYPE_LOW:
590 rtR0SolMemFree(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
591 break;
592
593 case RTR0MEMOBJTYPE_PHYS:
594 if (pMemSolaris->Core.u.Phys.fAllocated)
595 {
596 if (pMemSolaris->fLargePage)
597 rtR0MemObjSolLargePageFree(pMemSolaris->pvHandle, pMemSolaris->Core.cb);
598 else
599 rtR0SolMemFree(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
600 }
601 break;
602
603 case RTR0MEMOBJTYPE_PHYS_NC:
604 if (pMemSolaris->fIndivPages)
605 rtR0MemObjSolPagesFree(pMemSolaris->pvHandle, pMemSolaris->Core.cb);
606 else
607 rtR0SolMemFree(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
608 break;
609
610 case RTR0MEMOBJTYPE_PAGE:
611 ddi_umem_free(pMemSolaris->Cookie);
612 break;
613
614 case RTR0MEMOBJTYPE_LOCK:
615 rtR0MemObjSolUnlock(pMemSolaris->Core.pv, pMemSolaris->Core.cb, pMemSolaris->fAccess);
616 break;
617
618 case RTR0MEMOBJTYPE_MAPPING:
619 rtR0MemObjSolUnmap(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
620 break;
621
622 case RTR0MEMOBJTYPE_RES_VIRT:
623 {
624 if (pMemSolaris->Core.u.ResVirt.R0Process == NIL_RTR0PROCESS)
625 vmem_xfree(heap_arena, pMemSolaris->Core.pv, pMemSolaris->Core.cb);
626 else
627 AssertFailed();
628 break;
629 }
630
631 case RTR0MEMOBJTYPE_CONT: /* we don't use this type here. */
632 default:
633 AssertMsgFailed(("enmType=%d\n", pMemSolaris->Core.enmType));
634 return VERR_INTERNAL_ERROR;
635 }
636
637 return VINF_SUCCESS;
638}
639
640
641DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
642{
643 /* Create the object. */
644 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PAGE, NULL, cb);
645 if (RT_UNLIKELY(!pMemSolaris))
646 return VERR_NO_MEMORY;
647
648 void *pvMem = ddi_umem_alloc(cb, DDI_UMEM_SLEEP, &pMemSolaris->Cookie);
649 if (RT_UNLIKELY(!pvMem))
650 {
651 rtR0MemObjDelete(&pMemSolaris->Core);
652 return VERR_NO_PAGE_MEMORY;
653 }
654
655 pMemSolaris->Core.pv = pvMem;
656 pMemSolaris->pvHandle = NULL;
657 *ppMem = &pMemSolaris->Core;
658 return VINF_SUCCESS;
659}
660
661
662DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
663{
664 NOREF(fExecutable);
665
666 /* Create the object */
667 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOW, NULL, cb);
668 if (!pMemSolaris)
669 return VERR_NO_MEMORY;
670
671 /* Allocate physically low page-aligned memory. */
672 uint64_t uPhysHi = _4G - 1;
673 void *pvMem = rtR0SolMemAlloc(uPhysHi, NULL /* puPhys */, cb, PAGE_SIZE, false /* fContig */);
674 if (RT_UNLIKELY(!pvMem))
675 {
676 rtR0MemObjDelete(&pMemSolaris->Core);
677 return VERR_NO_LOW_MEMORY;
678 }
679 pMemSolaris->Core.pv = pvMem;
680 pMemSolaris->pvHandle = NULL;
681 *ppMem = &pMemSolaris->Core;
682 return VINF_SUCCESS;
683}
684
685
686DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
687{
688 NOREF(fExecutable);
689 return rtR0MemObjNativeAllocPhys(ppMem, cb, _4G - 1, PAGE_SIZE /* alignment */);
690}
691
692
693DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
694{
695#if HC_ARCH_BITS == 64
696 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS_NC, NULL, cb);
697 if (RT_UNLIKELY(!pMemSolaris))
698 return VERR_NO_MEMORY;
699
700 if (PhysHighest == NIL_RTHCPHYS)
701 {
702 uint64_t PhysAddr = UINT64_MAX;
703 void *pvPages = rtR0MemObjSolPagesAlloc(&PhysAddr, cb);
704 if (!pvPages)
705 {
706 LogRel(("rtR0MemObjNativeAllocPhysNC: rtR0MemObjSolPagesAlloc failed for cb=%u.\n", cb));
707 rtR0MemObjDelete(&pMemSolaris->Core);
708 return VERR_NO_MEMORY;
709 }
710 Assert(PhysAddr != UINT64_MAX);
711 Assert(!(PhysAddr & PAGE_OFFSET_MASK));
712
713 pMemSolaris->Core.pv = NULL;
714 pMemSolaris->pvHandle = pvPages;
715 pMemSolaris->fIndivPages = true;
716 *ppMem = &pMemSolaris->Core;
717 return VINF_SUCCESS;
718 }
719 else
720 {
721 /*
722 * If we must satisfy an upper limit constraint, it isn't feasible to grab individual pages.
723 * We fall back to using contig_alloc().
724 */
725 uint64_t PhysAddr = UINT64_MAX;
726 void *pvMem = rtR0SolMemAlloc(PhysHighest, &PhysAddr, cb, PAGE_SIZE, false /* fContig */);
727 if (!pvMem)
728 {
729 LogRel(("rtR0MemObjNativeAllocPhysNC: rtR0SolMemAlloc failed for cb=%u PhysHighest=%RHp.\n", cb, PhysHighest));
730 rtR0MemObjDelete(&pMemSolaris->Core);
731 return VERR_NO_MEMORY;
732 }
733 Assert(PhysAddr != UINT64_MAX);
734 Assert(!(PhysAddr & PAGE_OFFSET_MASK));
735
736 pMemSolaris->Core.pv = pvMem;
737 pMemSolaris->pvHandle = NULL;
738 pMemSolaris->fIndivPages = false;
739 *ppMem = &pMemSolaris->Core;
740 return VINF_SUCCESS;
741 }
742
743#else /* 32 bit: */
744 return VERR_NOT_SUPPORTED; /* see the RTR0MemObjAllocPhysNC specs */
745#endif
746}
747
748
749DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
750{
751 AssertMsgReturn(PhysHighest >= 16 *_1M, ("PhysHigest=%RHp\n", PhysHighest), VERR_NOT_SUPPORTED);
752
753 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS, NULL, cb);
754 if (RT_UNLIKELY(!pMemSolaris))
755 return VERR_NO_MEMORY;
756
757 /*
758 * Allocating one large page gets special treatment.
759 */
760 static uint32_t s_cbLargePage = UINT32_MAX;
761 if (s_cbLargePage == UINT32_MAX)
762 {
763 if (page_num_pagesizes() > 1)
764 ASMAtomicWriteU32(&s_cbLargePage, page_get_pagesize(1)); /* Page-size code 1 maps to _2M on Solaris x86/amd64. */
765 else
766 ASMAtomicWriteU32(&s_cbLargePage, 0);
767 }
768
769 uint64_t PhysAddr;
770 if ( cb == s_cbLargePage
771 && cb == uAlignment
772 && PhysHighest == NIL_RTHCPHYS)
773 {
774 /*
775 * Allocate one large page (backed by physically contiguous memory).
776 */
777 void *pvPages = rtR0MemObjSolLargePageAlloc(&PhysAddr, cb);
778 if (RT_LIKELY(pvPages))
779 {
780 AssertMsg(!(PhysAddr & (cb - 1)), ("%RHp\n", PhysAddr));
781 pMemSolaris->Core.pv = NULL;
782 pMemSolaris->Core.u.Phys.PhysBase = PhysAddr;
783 pMemSolaris->Core.u.Phys.fAllocated = true;
784 pMemSolaris->pvHandle = pvPages;
785 pMemSolaris->fLargePage = true;
786
787 *ppMem = &pMemSolaris->Core;
788 return VINF_SUCCESS;
789 }
790 }
791 else
792 {
793 /*
794 * Allocate physically contiguous memory aligned as specified.
795 */
796 AssertCompile(NIL_RTHCPHYS == UINT64_MAX); NOREF(RTASSERTVAR);
797 PhysAddr = PhysHighest;
798 void *pvMem = rtR0SolMemAlloc(PhysHighest, &PhysAddr, cb, uAlignment, true /* fContig */);
799 if (RT_LIKELY(pvMem))
800 {
801 Assert(!(PhysAddr & PAGE_OFFSET_MASK));
802 Assert(PhysAddr < PhysHighest);
803 Assert(PhysAddr + cb <= PhysHighest);
804
805 pMemSolaris->Core.pv = pvMem;
806 pMemSolaris->Core.u.Phys.PhysBase = PhysAddr;
807 pMemSolaris->Core.u.Phys.fAllocated = true;
808 pMemSolaris->pvHandle = NULL;
809 pMemSolaris->fLargePage = false;
810
811 *ppMem = &pMemSolaris->Core;
812 return VINF_SUCCESS;
813 }
814 }
815 rtR0MemObjDelete(&pMemSolaris->Core);
816 return VERR_NO_CONT_MEMORY;
817}
818
819
820DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
821{
822 AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE, VERR_NOT_SUPPORTED);
823
824 /* Create the object. */
825 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS, NULL, cb);
826 if (!pMemSolaris)
827 return VERR_NO_MEMORY;
828
829 /* There is no allocation here, it needs to be mapped somewhere first. */
830 pMemSolaris->Core.u.Phys.fAllocated = false;
831 pMemSolaris->Core.u.Phys.PhysBase = Phys;
832 pMemSolaris->Core.u.Phys.uCachePolicy = uCachePolicy;
833 *ppMem = &pMemSolaris->Core;
834 return VINF_SUCCESS;
835}
836
837
838DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess,
839 RTR0PROCESS R0Process)
840{
841 AssertReturn(R0Process == RTR0ProcHandleSelf(), VERR_INVALID_PARAMETER);
842 NOREF(fAccess);
843
844 /* Create the locking object */
845 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb);
846 if (!pMemSolaris)
847 return VERR_NO_MEMORY;
848
849 /* Lock down user pages. */
850 int fPageAccess = S_READ;
851 if (fAccess & RTMEM_PROT_WRITE)
852 fPageAccess = S_WRITE;
853 if (fAccess & RTMEM_PROT_EXEC)
854 fPageAccess = S_EXEC;
855 int rc = rtR0MemObjSolLock((void *)R3Ptr, cb, fPageAccess);
856 if (RT_FAILURE(rc))
857 {
858 LogRel(("rtR0MemObjNativeLockUser: rtR0MemObjSolLock failed rc=%d\n", rc));
859 rtR0MemObjDelete(&pMemSolaris->Core);
860 return rc;
861 }
862
863 /* Fill in the object attributes and return successfully. */
864 pMemSolaris->Core.u.Lock.R0Process = R0Process;
865 pMemSolaris->pvHandle = NULL;
866 pMemSolaris->fAccess = fPageAccess;
867 *ppMem = &pMemSolaris->Core;
868 return VINF_SUCCESS;
869}
870
871
872DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
873{
874 NOREF(fAccess);
875
876 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOCK, pv, cb);
877 if (!pMemSolaris)
878 return VERR_NO_MEMORY;
879
880 /* Lock down kernel pages. */
881 int fPageAccess = S_READ;
882 if (fAccess & RTMEM_PROT_WRITE)
883 fPageAccess = S_WRITE;
884 if (fAccess & RTMEM_PROT_EXEC)
885 fPageAccess = S_EXEC;
886 int rc = rtR0MemObjSolLock(pv, cb, fPageAccess);
887 if (RT_FAILURE(rc))
888 {
889 LogRel(("rtR0MemObjNativeLockKernel: rtR0MemObjSolLock failed rc=%d\n", rc));
890 rtR0MemObjDelete(&pMemSolaris->Core);
891 return rc;
892 }
893
894 /* Fill in the object attributes and return successfully. */
895 pMemSolaris->Core.u.Lock.R0Process = NIL_RTR0PROCESS;
896 pMemSolaris->pvHandle = NULL;
897 pMemSolaris->fAccess = fPageAccess;
898 *ppMem = &pMemSolaris->Core;
899 return VINF_SUCCESS;
900}
901
902
903DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
904{
905 PRTR0MEMOBJSOL pMemSolaris;
906
907 /*
908 * Use xalloc.
909 */
910 void *pv = vmem_xalloc(heap_arena, cb, uAlignment, 0 /* phase */, 0 /* nocross */,
911 NULL /* minaddr */, NULL /* maxaddr */, VM_SLEEP);
912 if (RT_UNLIKELY(!pv))
913 return VERR_NO_MEMORY;
914
915 /* Create the object. */
916 pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_RES_VIRT, pv, cb);
917 if (!pMemSolaris)
918 {
919 LogRel(("rtR0MemObjNativeReserveKernel failed to alloc memory object.\n"));
920 vmem_xfree(heap_arena, pv, cb);
921 return VERR_NO_MEMORY;
922 }
923
924 pMemSolaris->Core.u.ResVirt.R0Process = NIL_RTR0PROCESS;
925 *ppMem = &pMemSolaris->Core;
926 return VINF_SUCCESS;
927}
928
929
930DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment,
931 RTR0PROCESS R0Process)
932{
933 return VERR_NOT_SUPPORTED;
934}
935
936
937DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
938 unsigned fProt, size_t offSub, size_t cbSub)
939{
940 /* Fail if requested to do something we can't. */
941 AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);
942 if (uAlignment > PAGE_SIZE)
943 return VERR_NOT_SUPPORTED;
944
945 /*
946 * Use xalloc to get address space.
947 */
948 if (!cbSub)
949 cbSub = pMemToMap->cb;
950 void *pv = vmem_xalloc(heap_arena, cbSub, uAlignment, 0 /* phase */, 0 /* nocross */,
951 NULL /* minaddr */, NULL /* maxaddr */, VM_SLEEP);
952 if (RT_UNLIKELY(!pv))
953 return VERR_MAP_FAILED;
954
955 /*
956 * Load the pages from the other object into it.
957 */
958 uint32_t fAttr = HAT_UNORDERED_OK | HAT_MERGING_OK | HAT_LOADCACHING_OK | HAT_STORECACHING_OK;
959 if (fProt & RTMEM_PROT_READ)
960 fAttr |= PROT_READ;
961 if (fProt & RTMEM_PROT_EXEC)
962 fAttr |= PROT_EXEC;
963 if (fProt & RTMEM_PROT_WRITE)
964 fAttr |= PROT_WRITE;
965 fAttr |= HAT_NOSYNC;
966
967 int rc = VINF_SUCCESS;
968 size_t off = 0;
969 while (off < cbSub)
970 {
971 RTHCPHYS HCPhys = rtR0MemObjNativeGetPagePhysAddr(pMemToMap, (offSub + offSub) >> PAGE_SHIFT);
972 AssertBreakStmt(HCPhys != NIL_RTHCPHYS, rc = VERR_INTERNAL_ERROR_2);
973 pfn_t pfn = HCPhys >> PAGE_SHIFT;
974 AssertBreakStmt(((RTHCPHYS)pfn << PAGE_SHIFT) == HCPhys, rc = VERR_INTERNAL_ERROR_3);
975
976 hat_devload(kas.a_hat, (uint8_t *)pv + off, PAGE_SIZE, pfn, fAttr, HAT_LOAD_LOCK);
977
978 /* Advance. */
979 off += PAGE_SIZE;
980 }
981 if (RT_SUCCESS(rc))
982 {
983 /*
984 * Create a memory object for the mapping.
985 */
986 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_MAPPING, pv, cbSub);
987 if (pMemSolaris)
988 {
989 pMemSolaris->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
990 *ppMem = &pMemSolaris->Core;
991 return VINF_SUCCESS;
992 }
993
994 LogRel(("rtR0MemObjNativeMapKernel failed to alloc memory object.\n"));
995 rc = VERR_NO_MEMORY;
996 }
997
998 if (off)
999 hat_unload(kas.a_hat, pv, off, HAT_UNLOAD | HAT_UNLOAD_UNLOCK);
1000 vmem_xfree(heap_arena, pv, cbSub);
1001 return rc;
1002}
1003
1004
1005DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, PRTR0MEMOBJINTERNAL pMemToMap, RTR3PTR R3PtrFixed,
1006 size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
1007{
1008 /*
1009 * Fend off things we cannot do.
1010 */
1011 AssertMsgReturn(R3PtrFixed == (RTR3PTR)-1, ("%p\n", R3PtrFixed), VERR_NOT_SUPPORTED);
1012 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
1013 if (uAlignment != PAGE_SIZE)
1014 return VERR_NOT_SUPPORTED;
1015
1016 /*
1017 * Get parameters from the source object.
1018 */
1019 PRTR0MEMOBJSOL pMemToMapSolaris = (PRTR0MEMOBJSOL)pMemToMap;
1020 void *pv = pMemToMapSolaris->Core.pv;
1021 size_t cb = pMemToMapSolaris->Core.cb;
1022 size_t cPages = (cb + PAGE_SIZE - 1) >> PAGE_SHIFT;
1023
1024 /*
1025 * Create the mapping object
1026 */
1027 PRTR0MEMOBJSOL pMemSolaris;
1028 pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_MAPPING, pv, cb);
1029 if (RT_UNLIKELY(!pMemSolaris))
1030 return VERR_NO_MEMORY;
1031
1032 int rc = VINF_SUCCESS;
1033 uint64_t *paPhysAddrs = kmem_zalloc(sizeof(uint64_t) * cPages, KM_SLEEP);
1034 if (RT_LIKELY(paPhysAddrs))
1035 {
1036 /*
1037 * Prepare the pages for mapping according to type.
1038 */
1039 if ( pMemToMapSolaris->Core.enmType == RTR0MEMOBJTYPE_PHYS_NC
1040 && pMemToMapSolaris->fIndivPages)
1041 {
1042 page_t **ppPages = pMemToMapSolaris->pvHandle;
1043 AssertPtr(ppPages);
1044 for (size_t iPage = 0; iPage < cPages; iPage++)
1045 paPhysAddrs[iPage] = rtR0MemObjSolPagePhys(ppPages[iPage]);
1046 }
1047 else if ( pMemToMapSolaris->Core.enmType == RTR0MEMOBJTYPE_PHYS
1048 && pMemToMapSolaris->fLargePage)
1049 {
1050 RTHCPHYS Phys = pMemToMapSolaris->Core.u.Phys.PhysBase;
1051 for (size_t iPage = 0; iPage < cPages; iPage++, Phys += PAGE_SIZE)
1052 paPhysAddrs[iPage] = Phys;
1053 }
1054 else
1055 {
1056 /*
1057 * Have kernel mapping, just translate virtual to physical.
1058 */
1059 AssertPtr(pv);
1060 rc = VINF_SUCCESS;
1061 for (size_t iPage = 0; iPage < cPages; iPage++)
1062 {
1063 paPhysAddrs[iPage] = rtR0MemObjSolVirtToPhys(pv);
1064 if (RT_UNLIKELY(paPhysAddrs[iPage] == -(uint64_t)1))
1065 {
1066 LogRel(("rtR0MemObjNativeMapUser: no page to map.\n"));
1067 rc = VERR_MAP_FAILED;
1068 break;
1069 }
1070 pv = (void *)((uintptr_t)pv + PAGE_SIZE);
1071 }
1072 }
1073 if (RT_SUCCESS(rc))
1074 {
1075 unsigned fPageAccess = PROT_READ;
1076 if (fProt & RTMEM_PROT_WRITE)
1077 fPageAccess |= PROT_WRITE;
1078 if (fProt & RTMEM_PROT_EXEC)
1079 fPageAccess |= PROT_EXEC;
1080
1081 /*
1082 * Perform the actual mapping.
1083 */
1084 caddr_t UserAddr = NULL;
1085 rc = rtR0MemObjSolUserMap(&UserAddr, fPageAccess, paPhysAddrs, cb, PAGE_SIZE);
1086 if (RT_SUCCESS(rc))
1087 {
1088 pMemSolaris->Core.u.Mapping.R0Process = R0Process;
1089 pMemSolaris->Core.pv = UserAddr;
1090
1091 *ppMem = &pMemSolaris->Core;
1092 kmem_free(paPhysAddrs, sizeof(uint64_t) * cPages);
1093 return VINF_SUCCESS;
1094 }
1095
1096 LogRel(("rtR0MemObjNativeMapUser: rtR0MemObjSolUserMap failed rc=%d.\n", rc));
1097 }
1098
1099 rc = VERR_MAP_FAILED;
1100 kmem_free(paPhysAddrs, sizeof(uint64_t) * cPages);
1101 }
1102 else
1103 rc = VERR_NO_MEMORY;
1104 rtR0MemObjDelete(&pMemSolaris->Core);
1105 return rc;
1106}
1107
1108
1109DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
1110{
1111 NOREF(pMem);
1112 NOREF(offSub);
1113 NOREF(cbSub);
1114 NOREF(fProt);
1115 return VERR_NOT_SUPPORTED;
1116}
1117
1118
1119DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
1120{
1121 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)pMem;
1122
1123 switch (pMemSolaris->Core.enmType)
1124 {
1125 case RTR0MEMOBJTYPE_PHYS_NC:
1126 if ( pMemSolaris->Core.u.Phys.fAllocated
1127 || !pMemSolaris->fIndivPages)
1128 {
1129 uint8_t *pb = (uint8_t *)pMemSolaris->Core.pv + ((size_t)iPage << PAGE_SHIFT);
1130 return rtR0MemObjSolVirtToPhys(pb);
1131 }
1132 page_t **ppPages = pMemSolaris->pvHandle;
1133 return rtR0MemObjSolPagePhys(ppPages[iPage]);
1134
1135 case RTR0MEMOBJTYPE_PAGE:
1136 case RTR0MEMOBJTYPE_LOW:
1137 case RTR0MEMOBJTYPE_LOCK:
1138 {
1139 uint8_t *pb = (uint8_t *)pMemSolaris->Core.pv + ((size_t)iPage << PAGE_SHIFT);
1140 return rtR0MemObjSolVirtToPhys(pb);
1141 }
1142
1143 /*
1144 * Although mapping can be handled by rtR0MemObjSolVirtToPhys(offset) like the above case,
1145 * request it from the parent so that we have a clear distinction between CONT/PHYS_NC.
1146 */
1147 case RTR0MEMOBJTYPE_MAPPING:
1148 return rtR0MemObjNativeGetPagePhysAddr(pMemSolaris->Core.uRel.Child.pParent, iPage);
1149
1150 case RTR0MEMOBJTYPE_CONT:
1151 case RTR0MEMOBJTYPE_PHYS:
1152 AssertFailed(); /* handled by the caller */
1153 case RTR0MEMOBJTYPE_RES_VIRT:
1154 default:
1155 return NIL_RTHCPHYS;
1156 }
1157}
1158
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette