VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/solaris/memobj-r0drv-solaris.c@ 50266

Last change on this file since 50266 was 44677, checked in by vboxsync, 12 years ago

memobj-r0drv-solaris.c: Fixed mapping of RTR0MEMOBJTYPE_PHYS objects into kernel space.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 39.5 KB
Line 
1/* $Id: memobj-r0drv-solaris.c 44677 2013-02-13 22:50:00Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, Solaris.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*******************************************************************************
29* Header Files *
30*******************************************************************************/
31#include "the-solaris-kernel.h"
32#include "internal/iprt.h"
33#include <iprt/memobj.h>
34
35#include <iprt/asm.h>
36#include <iprt/assert.h>
37#include <iprt/err.h>
38#include <iprt/log.h>
39#include <iprt/mem.h>
40#include <iprt/param.h>
41#include <iprt/process.h>
42#include "internal/memobj.h"
43#include "memobj-r0drv-solaris.h"
44
45/*******************************************************************************
46* Defined Constants And Macros *
47*******************************************************************************/
48#define SOL_IS_KRNL_ADDR(vx) ((uintptr_t)(vx) >= kernelbase)
49
50
51/*******************************************************************************
52* Structures and Typedefs *
53*******************************************************************************/
54/**
55 * The Solaris version of the memory object structure.
56 */
57typedef struct RTR0MEMOBJSOL
58{
59 /** The core structure. */
60 RTR0MEMOBJINTERNAL Core;
61 /** Pointer to kernel memory cookie. */
62 ddi_umem_cookie_t Cookie;
63 /** Shadow locked pages. */
64 void *pvHandle;
65 /** Access during locking. */
66 int fAccess;
67 /** Set if large pages are involved in an RTR0MEMOBJTYPE_PHYS
68 * allocation. */
69 bool fLargePage;
70 /** Whether we have individual pages or a kernel-mapped virtual memory block in
71 * an RTR0MEMOBJTYPE_PHYS_NC allocation. */
72 bool fIndivPages;
73} RTR0MEMOBJSOL, *PRTR0MEMOBJSOL;
74
75
76/*******************************************************************************
77* Global Variables *
78*******************************************************************************/
79static vnode_t g_PageVnode;
80static kmutex_t g_OffsetMtx;
81static u_offset_t g_offPage;
82
83static vnode_t g_LargePageVnode;
84static kmutex_t g_LargePageOffsetMtx;
85static u_offset_t g_offLargePage;
86static bool g_fLargePageNoReloc;
87
88
89/**
90 * Returns the physical address for a virtual address.
91 *
92 * @param pv The virtual address.
93 *
94 * @returns The physical address corresponding to @a pv.
95 */
96static uint64_t rtR0MemObjSolVirtToPhys(void *pv)
97{
98 struct hat *pHat = NULL;
99 pfn_t PageFrameNum = 0;
100 uintptr_t uVirtAddr = (uintptr_t)pv;
101
102 if (SOL_IS_KRNL_ADDR(pv))
103 pHat = kas.a_hat;
104 else
105 {
106 proc_t *pProcess = (proc_t *)RTR0ProcHandleSelf();
107 AssertRelease(pProcess);
108 pHat = pProcess->p_as->a_hat;
109 }
110
111 PageFrameNum = hat_getpfnum(pHat, (caddr_t)(uVirtAddr & PAGEMASK));
112 AssertReleaseMsg(PageFrameNum != PFN_INVALID, ("rtR0MemObjSolVirtToPhys failed. pv=%p\n", pv));
113 return (((uint64_t)PageFrameNum << PAGE_SHIFT) | (uVirtAddr & PAGE_OFFSET_MASK));
114}
115
116
117/**
118 * Returns the physical address for a page.
119 *
120 * @param pPage Pointer to the page.
121 *
122 * @returns The physical address for a page.
123 */
124static inline uint64_t rtR0MemObjSolPagePhys(page_t *pPage)
125{
126 AssertPtr(pPage);
127 pfn_t PageFrameNum = page_pptonum(pPage);
128 AssertReleaseMsg(PageFrameNum != PFN_INVALID, ("rtR0MemObjSolPagePhys failed pPage=%p\n"));
129 return (uint64_t)PageFrameNum << PAGE_SHIFT;
130}
131
132
133/**
134 * Allocates one page.
135 *
136 * @param virtAddr The virtual address to which this page maybe mapped in
137 * the future.
138 *
139 * @returns Pointer to the allocated page, NULL on failure.
140 */
141static page_t *rtR0MemObjSolPageAlloc(caddr_t virtAddr)
142{
143 u_offset_t offPage;
144 seg_t KernelSeg;
145
146 /*
147 * 16777215 terabytes of total memory for all VMs or
148 * restart 8000 1GB VMs 2147483 times until wraparound!
149 */
150 mutex_enter(&g_OffsetMtx);
151 AssertCompileSize(u_offset_t, sizeof(uint64_t)); NOREF(RTASSERTVAR);
152 g_offPage = RT_ALIGN_64(g_offPage, PAGE_SIZE) + PAGE_SIZE;
153 offPage = g_offPage;
154 mutex_exit(&g_OffsetMtx);
155
156 KernelSeg.s_as = &kas;
157 page_t *pPage = page_create_va(&g_PageVnode, offPage, PAGE_SIZE, PG_WAIT | PG_NORELOC, &KernelSeg, virtAddr);
158 if (RT_LIKELY(pPage))
159 {
160 /*
161 * Lock this page into memory "long term" to prevent this page from being paged out
162 * when we drop the page lock temporarily (during free). Downgrade to a shared lock
163 * to prevent page relocation.
164 */
165 page_pp_lock(pPage, 0 /* COW */, 1 /* Kernel */);
166 page_io_unlock(pPage);
167 page_downgrade(pPage);
168 Assert(PAGE_LOCKED_SE(pPage, SE_SHARED));
169 }
170
171 return pPage;
172}
173
174
175/**
176 * Destroys an allocated page.
177 *
178 * @param pPage Pointer to the page to be destroyed.
179 * @remarks This function expects page in @c pPage to be shared locked.
180 */
181static void rtR0MemObjSolPageDestroy(page_t *pPage)
182{
183 /*
184 * We need to exclusive lock the pages before freeing them, if upgrading the shared lock to exclusive fails,
185 * drop the page lock and look it up from the hash. Record the page offset before we drop the page lock as
186 * we cannot touch any page_t members once the lock is dropped.
187 */
188 AssertPtr(pPage);
189 Assert(PAGE_LOCKED_SE(pPage, SE_SHARED));
190
191 u_offset_t offPage = pPage->p_offset;
192 int rc = page_tryupgrade(pPage);
193 if (!rc)
194 {
195 page_unlock(pPage);
196 page_t *pFoundPage = page_lookup(&g_PageVnode, offPage, SE_EXCL);
197
198 /*
199 * Since we allocated the pages as PG_NORELOC we should only get back the exact page always.
200 */
201 AssertReleaseMsg(pFoundPage == pPage, ("Page lookup failed %p:%llx returned %p, expected %p\n",
202 &g_PageVnode, offPage, pFoundPage, pPage));
203 }
204 Assert(PAGE_LOCKED_SE(pPage, SE_EXCL));
205 page_pp_unlock(pPage, 0 /* COW */, 1 /* Kernel */);
206 page_destroy(pPage, 0 /* move it to the free list */);
207}
208
209
210/**
211 * Allocates physical, non-contiguous memory of pages.
212 *
213 * @param puPhys Where to store the physical address of first page. Optional,
214 * can be NULL.
215 * @param cb The size of the allocation.
216 *
217 * @return Array of allocated pages, NULL on failure.
218 */
219static page_t **rtR0MemObjSolPagesAlloc(uint64_t *puPhys, size_t cb)
220{
221 /*
222 * VM1:
223 * The page freelist and cachelist both hold pages that are not mapped into any address space.
224 * The cachelist is not really free pages but when memory is exhausted they'll be moved to the
225 * free lists, it's the total of the free+cache list that we see on the 'free' column in vmstat.
226 *
227 * VM2:
228 * @todo Document what happens behind the scenes in VM2 regarding the free and cachelist.
229 */
230
231 /*
232 * Non-pageable memory reservation request for _4K pages, don't sleep.
233 */
234 size_t cPages = (cb + PAGE_SIZE - 1) >> PAGE_SHIFT;
235 int rc = page_resv(cPages, KM_NOSLEEP);
236 if (rc)
237 {
238 size_t cbPages = cPages * sizeof(page_t *);
239 page_t **ppPages = kmem_zalloc(cbPages, KM_SLEEP);
240 if (RT_LIKELY(ppPages))
241 {
242 /*
243 * Get pages from kseg, the 'virtAddr' here is only for colouring but unfortunately
244 * we don't yet have the 'virtAddr' to which this memory may be mapped.
245 */
246 caddr_t virtAddr = 0;
247 for (size_t i = 0; i < cPages; i++, virtAddr += PAGE_SIZE)
248 {
249 /*
250 * Get a page from the free list locked exclusively. The page will be named (hashed in)
251 * and we rely on it during free. The page we get will be shared locked to prevent the page
252 * from being relocated.
253 */
254 page_t *pPage = rtR0MemObjSolPageAlloc(virtAddr);
255 if (RT_UNLIKELY(!pPage))
256 {
257 /*
258 * No page found, release whatever pages we grabbed so far.
259 */
260 for (size_t k = 0; k < i; k++)
261 rtR0MemObjSolPageDestroy(ppPages[k]);
262 kmem_free(ppPages, cbPages);
263 page_unresv(cPages);
264 return NULL;
265 }
266
267 ppPages[i] = pPage;
268 }
269
270 if (puPhys)
271 *puPhys = rtR0MemObjSolPagePhys(ppPages[0]);
272 return ppPages;
273 }
274
275 page_unresv(cPages);
276 }
277
278 return NULL;
279}
280
281
282/**
283 * Frees the allocates pages.
284 *
285 * @param ppPages Pointer to the page list.
286 * @param cbPages Size of the allocation.
287 */
288static void rtR0MemObjSolPagesFree(page_t **ppPages, size_t cb)
289{
290 size_t cPages = (cb + PAGE_SIZE - 1) >> PAGE_SHIFT;
291 size_t cbPages = cPages * sizeof(page_t *);
292 for (size_t iPage = 0; iPage < cPages; iPage++)
293 rtR0MemObjSolPageDestroy(ppPages[iPage]);
294
295 kmem_free(ppPages, cbPages);
296 page_unresv(cPages);
297}
298
299
300/**
301 * Allocates one large page.
302 *
303 * @param puPhys Where to store the physical address of the allocated
304 * page. Optional, can be NULL.
305 * @param cbLargePage Size of the large page.
306 *
307 * @returns Pointer to a list of pages that cover the large page, NULL on
308 * failure.
309 */
310static page_t **rtR0MemObjSolLargePageAlloc(uint64_t *puPhys, size_t cbLargePage)
311{
312 /*
313 * Check PG_NORELOC support for large pages. Using this helps prevent _1G page
314 * fragementation on systems that support it.
315 */
316 static bool fPageNoRelocChecked = false;
317 if (fPageNoRelocChecked == false)
318 {
319 fPageNoRelocChecked = true;
320 g_fLargePageNoReloc = false;
321 if ( g_pfnrtR0Sol_page_noreloc_supported
322 && g_pfnrtR0Sol_page_noreloc_supported(cbLargePage))
323 {
324 g_fLargePageNoReloc = true;
325 }
326 }
327
328 /*
329 * Non-pageable memory reservation request for _4K pages, don't sleep.
330 */
331 size_t cPages = (cbLargePage + PAGE_SIZE - 1) >> PAGE_SHIFT;
332 size_t cbPages = cPages * sizeof(page_t *);
333 u_offset_t offPage = 0;
334 int rc = page_resv(cPages, KM_NOSLEEP);
335 if (rc)
336 {
337 page_t **ppPages = kmem_zalloc(cbPages, KM_SLEEP);
338 if (RT_LIKELY(ppPages))
339 {
340 mutex_enter(&g_LargePageOffsetMtx);
341 AssertCompileSize(u_offset_t, sizeof(uint64_t)); NOREF(RTASSERTVAR);
342 g_offLargePage = RT_ALIGN_64(g_offLargePage, cbLargePage) + cbLargePage;
343 offPage = g_offLargePage;
344 mutex_exit(&g_LargePageOffsetMtx);
345
346 seg_t KernelSeg;
347 KernelSeg.s_as = &kas;
348 page_t *pRootPage = page_create_va_large(&g_LargePageVnode, offPage, cbLargePage,
349 PG_EXCL | (g_fLargePageNoReloc ? PG_NORELOC : 0), &KernelSeg,
350 0 /* vaddr */,NULL /* locality group */);
351 if (pRootPage)
352 {
353 /*
354 * Split it into sub-pages, downgrade each page to a shared lock to prevent page relocation.
355 */
356 page_t *pPageList = pRootPage;
357 for (size_t iPage = 0; iPage < cPages; iPage++)
358 {
359 page_t *pPage = pPageList;
360 AssertPtr(pPage);
361 AssertMsg(page_pptonum(pPage) == iPage + page_pptonum(pRootPage),
362 ("%p:%lx %lx+%lx\n", pPage, page_pptonum(pPage), iPage, page_pptonum(pRootPage)));
363 AssertMsg(pPage->p_szc == pRootPage->p_szc, ("Size code mismatch %p %d %d\n", pPage,
364 (int)pPage->p_szc, (int)pRootPage->p_szc));
365
366 /*
367 * Lock the page into memory "long term". This prevents callers of page_try_demote_pages() (such as the
368 * pageout scanner) from demoting the large page into smaller pages while we temporarily release the
369 * exclusive lock (during free). We pass "0, 1" since we've already accounted for availrmem during
370 * page_resv().
371 */
372 page_pp_lock(pPage, 0 /* COW */, 1 /* Kernel */);
373
374 page_sub(&pPageList, pPage);
375 page_io_unlock(pPage);
376 page_downgrade(pPage);
377 Assert(PAGE_LOCKED_SE(pPage, SE_SHARED));
378
379 ppPages[iPage] = pPage;
380 }
381 Assert(pPageList == NULL);
382 Assert(ppPages[0] == pRootPage);
383
384 uint64_t uPhys = rtR0MemObjSolPagePhys(pRootPage);
385 AssertMsg(!(uPhys & (cbLargePage - 1)), ("%llx %zx\n", uPhys, cbLargePage));
386 if (puPhys)
387 *puPhys = uPhys;
388 return ppPages;
389 }
390
391 /*
392 * Don't restore offPrev in case of failure (race condition), we have plenty of offset space.
393 * The offset must be unique (for the same vnode) or we'll encounter panics on page_create_va_large().
394 */
395 kmem_free(ppPages, cbPages);
396 }
397
398 page_unresv(cPages);
399 }
400 return NULL;
401}
402
403
404/**
405 * Frees the large page.
406 *
407 * @param ppPages Pointer to the list of small pages that cover the
408 * large page.
409 * @param cbLargePage Size of the allocation (i.e. size of the large
410 * page).
411 */
412static void rtR0MemObjSolLargePageFree(page_t **ppPages, size_t cbLargePage)
413{
414 Assert(ppPages);
415 Assert(cbLargePage > PAGE_SIZE);
416
417 bool fDemoted = false;
418 size_t cPages = (cbLargePage + PAGE_SIZE - 1) >> PAGE_SHIFT;
419 size_t cbPages = cPages * sizeof(page_t *);
420 page_t *pPageList = ppPages[0];
421
422 for (size_t iPage = 0; iPage < cPages; iPage++)
423 {
424 /*
425 * We need the pages exclusively locked, try upgrading the shared lock.
426 * If it fails, drop the shared page lock (cannot access any page_t members once this is done)
427 * and lookup the page from the page hash locking it exclusively.
428 */
429 page_t *pPage = ppPages[iPage];
430 u_offset_t offPage = pPage->p_offset;
431 int rc = page_tryupgrade(pPage);
432 if (!rc)
433 {
434 page_unlock(pPage);
435 page_t *pFoundPage = page_lookup(&g_LargePageVnode, offPage, SE_EXCL);
436 AssertRelease(pFoundPage);
437
438 if (g_fLargePageNoReloc)
439 {
440 /*
441 * This can only be guaranteed if PG_NORELOC is used while allocating the pages.
442 */
443 AssertReleaseMsg(pFoundPage == pPage,
444 ("lookup failed %p:%llu returned %p, expected %p\n", &g_LargePageVnode, offPage,
445 pFoundPage, pPage));
446 }
447
448 /*
449 * Check for page demotion (regardless of relocation). Some places in Solaris (e.g. VM1 page_retire())
450 * could possibly demote the large page to _4K pages between our call to page_unlock() and page_lookup().
451 */
452 if (page_get_pagecnt(pFoundPage->p_szc) == 1) /* Base size of only _4K associated with this page. */
453 fDemoted = true;
454 pPage = pFoundPage;
455 ppPages[iPage] = pFoundPage;
456 }
457 Assert(PAGE_LOCKED_SE(pPage, SE_EXCL));
458 page_pp_unlock(pPage, 0 /* COW */, 1 /* Kernel */);
459 }
460
461 if (fDemoted)
462 {
463 for (size_t iPage = 0; iPage < cPages; iPage++)
464 {
465 Assert(page_get_pagecnt(ppPages[iPage]->p_szc) == 1);
466 page_destroy(ppPages[iPage], 0 /* move it to the free list */);
467 }
468 }
469 else
470 {
471 /*
472 * Although we shred the adjacent pages in the linked list, page_destroy_pages works on
473 * adjacent pages via array increments. So this does indeed free all the pages.
474 */
475 AssertPtr(pPageList);
476 page_destroy_pages(pPageList);
477 }
478 kmem_free(ppPages, cbPages);
479 page_unresv(cPages);
480}
481
482
483/**
484 * Unmaps kernel/user-space mapped memory.
485 *
486 * @param pv Pointer to the mapped memory block.
487 * @param cb Size of the memory block.
488 */
489static void rtR0MemObjSolUnmap(void *pv, size_t cb)
490{
491 if (SOL_IS_KRNL_ADDR(pv))
492 {
493 hat_unload(kas.a_hat, pv, cb, HAT_UNLOAD | HAT_UNLOAD_UNLOCK);
494 vmem_free(heap_arena, pv, cb);
495 }
496 else
497 {
498 struct as *pAddrSpace = ((proc_t *)RTR0ProcHandleSelf())->p_as;
499 AssertPtr(pAddrSpace);
500 as_rangelock(pAddrSpace);
501 as_unmap(pAddrSpace, pv, cb);
502 as_rangeunlock(pAddrSpace);
503 }
504}
505
506
507/**
508 * Lock down memory mappings for a virtual address.
509 *
510 * @param pv Pointer to the memory to lock down.
511 * @param cb Size of the memory block.
512 * @param fAccess Page access rights (S_READ, S_WRITE, S_EXEC)
513 *
514 * @returns IPRT status code.
515 */
516static int rtR0MemObjSolLock(void *pv, size_t cb, int fPageAccess)
517{
518 /*
519 * Kernel memory mappings on x86/amd64 are always locked, only handle user-space memory.
520 */
521 if (!SOL_IS_KRNL_ADDR(pv))
522 {
523 proc_t *pProc = (proc_t *)RTR0ProcHandleSelf();
524 AssertPtr(pProc);
525 faultcode_t rc = as_fault(pProc->p_as->a_hat, pProc->p_as, (caddr_t)pv, cb, F_SOFTLOCK, fPageAccess);
526 if (rc)
527 {
528 LogRel(("rtR0MemObjSolLock failed for pv=%pv cb=%lx fPageAccess=%d rc=%d\n", pv, cb, fPageAccess, rc));
529 return VERR_LOCK_FAILED;
530 }
531 }
532 return VINF_SUCCESS;
533}
534
535
536/**
537 * Unlock memory mappings for a virtual address.
538 *
539 * @param pv Pointer to the locked memory.
540 * @param cb Size of the memory block.
541 * @param fPageAccess Page access rights (S_READ, S_WRITE, S_EXEC).
542 */
543static void rtR0MemObjSolUnlock(void *pv, size_t cb, int fPageAccess)
544{
545 if (!SOL_IS_KRNL_ADDR(pv))
546 {
547 proc_t *pProcess = (proc_t *)RTR0ProcHandleSelf();
548 AssertPtr(pProcess);
549 as_fault(pProcess->p_as->a_hat, pProcess->p_as, (caddr_t)pv, cb, F_SOFTUNLOCK, fPageAccess);
550 }
551}
552
553
554/**
555 * Maps a list of physical pages into user address space.
556 *
557 * @param pVirtAddr Where to store the virtual address of the mapping.
558 * @param fPageAccess Page access rights (PROT_READ, PROT_WRITE,
559 * PROT_EXEC)
560 * @param paPhysAddrs Array of physical addresses to pages.
561 * @param cb Size of memory being mapped.
562 *
563 * @returns IPRT status code.
564 */
565static int rtR0MemObjSolUserMap(caddr_t *pVirtAddr, unsigned fPageAccess, uint64_t *paPhysAddrs, size_t cb, size_t cbPageSize)
566{
567 struct as *pAddrSpace = ((proc_t *)RTR0ProcHandleSelf())->p_as;
568 int rc = VERR_INTERNAL_ERROR;
569 SEGVBOX_CRARGS Args;
570
571 Args.paPhysAddrs = paPhysAddrs;
572 Args.fPageAccess = fPageAccess;
573 Args.cbPageSize = cbPageSize;
574
575 as_rangelock(pAddrSpace);
576 map_addr(pVirtAddr, cb, 0 /* offset */, 0 /* vacalign */, MAP_SHARED);
577 if (*pVirtAddr != NULL)
578 rc = as_map(pAddrSpace, *pVirtAddr, cb, rtR0SegVBoxSolCreate, &Args);
579 else
580 rc = ENOMEM;
581 as_rangeunlock(pAddrSpace);
582
583 return RTErrConvertFromErrno(rc);
584}
585
586
587DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
588{
589 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)pMem;
590
591 switch (pMemSolaris->Core.enmType)
592 {
593 case RTR0MEMOBJTYPE_LOW:
594 rtR0SolMemFree(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
595 break;
596
597 case RTR0MEMOBJTYPE_PHYS:
598 if (pMemSolaris->Core.u.Phys.fAllocated)
599 {
600 if (pMemSolaris->fLargePage)
601 rtR0MemObjSolLargePageFree(pMemSolaris->pvHandle, pMemSolaris->Core.cb);
602 else
603 rtR0SolMemFree(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
604 }
605 break;
606
607 case RTR0MEMOBJTYPE_PHYS_NC:
608 if (pMemSolaris->fIndivPages)
609 rtR0MemObjSolPagesFree(pMemSolaris->pvHandle, pMemSolaris->Core.cb);
610 else
611 rtR0SolMemFree(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
612 break;
613
614 case RTR0MEMOBJTYPE_PAGE:
615 ddi_umem_free(pMemSolaris->Cookie);
616 break;
617
618 case RTR0MEMOBJTYPE_LOCK:
619 rtR0MemObjSolUnlock(pMemSolaris->Core.pv, pMemSolaris->Core.cb, pMemSolaris->fAccess);
620 break;
621
622 case RTR0MEMOBJTYPE_MAPPING:
623 rtR0MemObjSolUnmap(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
624 break;
625
626 case RTR0MEMOBJTYPE_RES_VIRT:
627 {
628 if (pMemSolaris->Core.u.ResVirt.R0Process == NIL_RTR0PROCESS)
629 vmem_xfree(heap_arena, pMemSolaris->Core.pv, pMemSolaris->Core.cb);
630 else
631 AssertFailed();
632 break;
633 }
634
635 case RTR0MEMOBJTYPE_CONT: /* we don't use this type here. */
636 default:
637 AssertMsgFailed(("enmType=%d\n", pMemSolaris->Core.enmType));
638 return VERR_INTERNAL_ERROR;
639 }
640
641 return VINF_SUCCESS;
642}
643
644
645DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
646{
647 /* Create the object. */
648 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PAGE, NULL, cb);
649 if (RT_UNLIKELY(!pMemSolaris))
650 return VERR_NO_MEMORY;
651
652 void *pvMem = ddi_umem_alloc(cb, DDI_UMEM_SLEEP, &pMemSolaris->Cookie);
653 if (RT_UNLIKELY(!pvMem))
654 {
655 rtR0MemObjDelete(&pMemSolaris->Core);
656 return VERR_NO_PAGE_MEMORY;
657 }
658
659 pMemSolaris->Core.pv = pvMem;
660 pMemSolaris->pvHandle = NULL;
661 *ppMem = &pMemSolaris->Core;
662 return VINF_SUCCESS;
663}
664
665
666DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
667{
668 NOREF(fExecutable);
669
670 /* Create the object */
671 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOW, NULL, cb);
672 if (!pMemSolaris)
673 return VERR_NO_MEMORY;
674
675 /* Allocate physically low page-aligned memory. */
676 uint64_t uPhysHi = _4G - 1;
677 void *pvMem = rtR0SolMemAlloc(uPhysHi, NULL /* puPhys */, cb, PAGE_SIZE, false /* fContig */);
678 if (RT_UNLIKELY(!pvMem))
679 {
680 rtR0MemObjDelete(&pMemSolaris->Core);
681 return VERR_NO_LOW_MEMORY;
682 }
683 pMemSolaris->Core.pv = pvMem;
684 pMemSolaris->pvHandle = NULL;
685 *ppMem = &pMemSolaris->Core;
686 return VINF_SUCCESS;
687}
688
689
690DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
691{
692 NOREF(fExecutable);
693 return rtR0MemObjNativeAllocPhys(ppMem, cb, _4G - 1, PAGE_SIZE /* alignment */);
694}
695
696
697DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
698{
699#if HC_ARCH_BITS == 64
700 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS_NC, NULL, cb);
701 if (RT_UNLIKELY(!pMemSolaris))
702 return VERR_NO_MEMORY;
703
704 if (PhysHighest == NIL_RTHCPHYS)
705 {
706 uint64_t PhysAddr = UINT64_MAX;
707 void *pvPages = rtR0MemObjSolPagesAlloc(&PhysAddr, cb);
708 if (!pvPages)
709 {
710 LogRel(("rtR0MemObjNativeAllocPhysNC: rtR0MemObjSolPagesAlloc failed for cb=%u.\n", cb));
711 rtR0MemObjDelete(&pMemSolaris->Core);
712 return VERR_NO_MEMORY;
713 }
714 Assert(PhysAddr != UINT64_MAX);
715 Assert(!(PhysAddr & PAGE_OFFSET_MASK));
716
717 pMemSolaris->Core.pv = NULL;
718 pMemSolaris->pvHandle = pvPages;
719 pMemSolaris->fIndivPages = true;
720 *ppMem = &pMemSolaris->Core;
721 return VINF_SUCCESS;
722 }
723 else
724 {
725 /*
726 * If we must satisfy an upper limit constraint, it isn't feasible to grab individual pages.
727 * We fall back to using contig_alloc().
728 */
729 uint64_t PhysAddr = UINT64_MAX;
730 void *pvMem = rtR0SolMemAlloc(PhysHighest, &PhysAddr, cb, PAGE_SIZE, false /* fContig */);
731 if (!pvMem)
732 {
733 LogRel(("rtR0MemObjNativeAllocPhysNC: rtR0SolMemAlloc failed for cb=%u PhysHighest=%RHp.\n", cb, PhysHighest));
734 rtR0MemObjDelete(&pMemSolaris->Core);
735 return VERR_NO_MEMORY;
736 }
737 Assert(PhysAddr != UINT64_MAX);
738 Assert(!(PhysAddr & PAGE_OFFSET_MASK));
739
740 pMemSolaris->Core.pv = pvMem;
741 pMemSolaris->pvHandle = NULL;
742 pMemSolaris->fIndivPages = false;
743 *ppMem = &pMemSolaris->Core;
744 return VINF_SUCCESS;
745 }
746
747#else /* 32 bit: */
748 return VERR_NOT_SUPPORTED; /* see the RTR0MemObjAllocPhysNC specs */
749#endif
750}
751
752
753DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
754{
755 AssertMsgReturn(PhysHighest >= 16 *_1M, ("PhysHigest=%RHp\n", PhysHighest), VERR_NOT_SUPPORTED);
756
757 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS, NULL, cb);
758 if (RT_UNLIKELY(!pMemSolaris))
759 return VERR_NO_MEMORY;
760
761 /*
762 * Allocating one large page gets special treatment.
763 */
764 static uint32_t s_cbLargePage = UINT32_MAX;
765 if (s_cbLargePage == UINT32_MAX)
766 {
767 if (page_num_pagesizes() > 1)
768 ASMAtomicWriteU32(&s_cbLargePage, page_get_pagesize(1)); /* Page-size code 1 maps to _2M on Solaris x86/amd64. */
769 else
770 ASMAtomicWriteU32(&s_cbLargePage, 0);
771 }
772
773 uint64_t PhysAddr;
774 if ( cb == s_cbLargePage
775 && cb == uAlignment
776 && PhysHighest == NIL_RTHCPHYS)
777 {
778 /*
779 * Allocate one large page (backed by physically contiguous memory).
780 */
781 void *pvPages = rtR0MemObjSolLargePageAlloc(&PhysAddr, cb);
782 if (RT_LIKELY(pvPages))
783 {
784 AssertMsg(!(PhysAddr & (cb - 1)), ("%RHp\n", PhysAddr));
785 pMemSolaris->Core.pv = NULL;
786 pMemSolaris->Core.u.Phys.PhysBase = PhysAddr;
787 pMemSolaris->Core.u.Phys.fAllocated = true;
788 pMemSolaris->pvHandle = pvPages;
789 pMemSolaris->fLargePage = true;
790
791 *ppMem = &pMemSolaris->Core;
792 return VINF_SUCCESS;
793 }
794 }
795 else
796 {
797 /*
798 * Allocate physically contiguous memory aligned as specified.
799 */
800 AssertCompile(NIL_RTHCPHYS == UINT64_MAX); NOREF(RTASSERTVAR);
801 PhysAddr = PhysHighest;
802 void *pvMem = rtR0SolMemAlloc(PhysHighest, &PhysAddr, cb, uAlignment, true /* fContig */);
803 if (RT_LIKELY(pvMem))
804 {
805 Assert(!(PhysAddr & PAGE_OFFSET_MASK));
806 Assert(PhysAddr < PhysHighest);
807 Assert(PhysAddr + cb <= PhysHighest);
808
809 pMemSolaris->Core.pv = pvMem;
810 pMemSolaris->Core.u.Phys.PhysBase = PhysAddr;
811 pMemSolaris->Core.u.Phys.fAllocated = true;
812 pMemSolaris->pvHandle = NULL;
813 pMemSolaris->fLargePage = false;
814
815 *ppMem = &pMemSolaris->Core;
816 return VINF_SUCCESS;
817 }
818 }
819 rtR0MemObjDelete(&pMemSolaris->Core);
820 return VERR_NO_CONT_MEMORY;
821}
822
823
824DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
825{
826 AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE, VERR_NOT_SUPPORTED);
827
828 /* Create the object. */
829 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS, NULL, cb);
830 if (!pMemSolaris)
831 return VERR_NO_MEMORY;
832
833 /* There is no allocation here, it needs to be mapped somewhere first. */
834 pMemSolaris->Core.u.Phys.fAllocated = false;
835 pMemSolaris->Core.u.Phys.PhysBase = Phys;
836 pMemSolaris->Core.u.Phys.uCachePolicy = uCachePolicy;
837 *ppMem = &pMemSolaris->Core;
838 return VINF_SUCCESS;
839}
840
841
842DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess,
843 RTR0PROCESS R0Process)
844{
845 AssertReturn(R0Process == RTR0ProcHandleSelf(), VERR_INVALID_PARAMETER);
846 NOREF(fAccess);
847
848 /* Create the locking object */
849 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb);
850 if (!pMemSolaris)
851 return VERR_NO_MEMORY;
852
853 /* Lock down user pages. */
854 int fPageAccess = S_READ;
855 if (fAccess & RTMEM_PROT_WRITE)
856 fPageAccess = S_WRITE;
857 if (fAccess & RTMEM_PROT_EXEC)
858 fPageAccess = S_EXEC;
859 int rc = rtR0MemObjSolLock((void *)R3Ptr, cb, fPageAccess);
860 if (RT_FAILURE(rc))
861 {
862 LogRel(("rtR0MemObjNativeLockUser: rtR0MemObjSolLock failed rc=%d\n", rc));
863 rtR0MemObjDelete(&pMemSolaris->Core);
864 return rc;
865 }
866
867 /* Fill in the object attributes and return successfully. */
868 pMemSolaris->Core.u.Lock.R0Process = R0Process;
869 pMemSolaris->pvHandle = NULL;
870 pMemSolaris->fAccess = fPageAccess;
871 *ppMem = &pMemSolaris->Core;
872 return VINF_SUCCESS;
873}
874
875
876DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
877{
878 NOREF(fAccess);
879
880 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOCK, pv, cb);
881 if (!pMemSolaris)
882 return VERR_NO_MEMORY;
883
884 /* Lock down kernel pages. */
885 int fPageAccess = S_READ;
886 if (fAccess & RTMEM_PROT_WRITE)
887 fPageAccess = S_WRITE;
888 if (fAccess & RTMEM_PROT_EXEC)
889 fPageAccess = S_EXEC;
890 int rc = rtR0MemObjSolLock(pv, cb, fPageAccess);
891 if (RT_FAILURE(rc))
892 {
893 LogRel(("rtR0MemObjNativeLockKernel: rtR0MemObjSolLock failed rc=%d\n", rc));
894 rtR0MemObjDelete(&pMemSolaris->Core);
895 return rc;
896 }
897
898 /* Fill in the object attributes and return successfully. */
899 pMemSolaris->Core.u.Lock.R0Process = NIL_RTR0PROCESS;
900 pMemSolaris->pvHandle = NULL;
901 pMemSolaris->fAccess = fPageAccess;
902 *ppMem = &pMemSolaris->Core;
903 return VINF_SUCCESS;
904}
905
906
907DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
908{
909 PRTR0MEMOBJSOL pMemSolaris;
910
911 /*
912 * Use xalloc.
913 */
914 void *pv = vmem_xalloc(heap_arena, cb, uAlignment, 0 /* phase */, 0 /* nocross */,
915 NULL /* minaddr */, NULL /* maxaddr */, VM_SLEEP);
916 if (RT_UNLIKELY(!pv))
917 return VERR_NO_MEMORY;
918
919 /* Create the object. */
920 pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_RES_VIRT, pv, cb);
921 if (!pMemSolaris)
922 {
923 LogRel(("rtR0MemObjNativeReserveKernel failed to alloc memory object.\n"));
924 vmem_xfree(heap_arena, pv, cb);
925 return VERR_NO_MEMORY;
926 }
927
928 pMemSolaris->Core.u.ResVirt.R0Process = NIL_RTR0PROCESS;
929 *ppMem = &pMemSolaris->Core;
930 return VINF_SUCCESS;
931}
932
933
934DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment,
935 RTR0PROCESS R0Process)
936{
937 return VERR_NOT_SUPPORTED;
938}
939
940
941DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
942 unsigned fProt, size_t offSub, size_t cbSub)
943{
944 /* Fail if requested to do something we can't. */
945 AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);
946 if (uAlignment > PAGE_SIZE)
947 return VERR_NOT_SUPPORTED;
948
949 /*
950 * Use xalloc to get address space.
951 */
952 if (!cbSub)
953 cbSub = pMemToMap->cb;
954 void *pv = vmem_xalloc(heap_arena, cbSub, uAlignment, 0 /* phase */, 0 /* nocross */,
955 NULL /* minaddr */, NULL /* maxaddr */, VM_SLEEP);
956 if (RT_UNLIKELY(!pv))
957 return VERR_MAP_FAILED;
958
959 /*
960 * Load the pages from the other object into it.
961 */
962 uint32_t fAttr = HAT_UNORDERED_OK | HAT_MERGING_OK | HAT_LOADCACHING_OK | HAT_STORECACHING_OK;
963 if (fProt & RTMEM_PROT_READ)
964 fAttr |= PROT_READ;
965 if (fProt & RTMEM_PROT_EXEC)
966 fAttr |= PROT_EXEC;
967 if (fProt & RTMEM_PROT_WRITE)
968 fAttr |= PROT_WRITE;
969 fAttr |= HAT_NOSYNC;
970
971 int rc = VINF_SUCCESS;
972 size_t off = 0;
973 while (off < cbSub)
974 {
975 RTHCPHYS HCPhys = RTR0MemObjGetPagePhysAddr(pMemToMap, (offSub + offSub) >> PAGE_SHIFT);
976 AssertBreakStmt(HCPhys != NIL_RTHCPHYS, rc = VERR_INTERNAL_ERROR_2);
977 pfn_t pfn = HCPhys >> PAGE_SHIFT;
978 AssertBreakStmt(((RTHCPHYS)pfn << PAGE_SHIFT) == HCPhys, rc = VERR_INTERNAL_ERROR_3);
979
980 hat_devload(kas.a_hat, (uint8_t *)pv + off, PAGE_SIZE, pfn, fAttr, HAT_LOAD_LOCK);
981
982 /* Advance. */
983 off += PAGE_SIZE;
984 }
985 if (RT_SUCCESS(rc))
986 {
987 /*
988 * Create a memory object for the mapping.
989 */
990 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_MAPPING, pv, cbSub);
991 if (pMemSolaris)
992 {
993 pMemSolaris->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
994 *ppMem = &pMemSolaris->Core;
995 return VINF_SUCCESS;
996 }
997
998 LogRel(("rtR0MemObjNativeMapKernel failed to alloc memory object.\n"));
999 rc = VERR_NO_MEMORY;
1000 }
1001
1002 if (off)
1003 hat_unload(kas.a_hat, pv, off, HAT_UNLOAD | HAT_UNLOAD_UNLOCK);
1004 vmem_xfree(heap_arena, pv, cbSub);
1005 return rc;
1006}
1007
1008
1009DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, PRTR0MEMOBJINTERNAL pMemToMap, RTR3PTR R3PtrFixed,
1010 size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
1011{
1012 /*
1013 * Fend off things we cannot do.
1014 */
1015 AssertMsgReturn(R3PtrFixed == (RTR3PTR)-1, ("%p\n", R3PtrFixed), VERR_NOT_SUPPORTED);
1016 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
1017 if (uAlignment != PAGE_SIZE)
1018 return VERR_NOT_SUPPORTED;
1019
1020 /*
1021 * Get parameters from the source object.
1022 */
1023 PRTR0MEMOBJSOL pMemToMapSolaris = (PRTR0MEMOBJSOL)pMemToMap;
1024 void *pv = pMemToMapSolaris->Core.pv;
1025 size_t cb = pMemToMapSolaris->Core.cb;
1026 size_t cPages = (cb + PAGE_SIZE - 1) >> PAGE_SHIFT;
1027
1028 /*
1029 * Create the mapping object
1030 */
1031 PRTR0MEMOBJSOL pMemSolaris;
1032 pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_MAPPING, pv, cb);
1033 if (RT_UNLIKELY(!pMemSolaris))
1034 return VERR_NO_MEMORY;
1035
1036 int rc = VINF_SUCCESS;
1037 uint64_t *paPhysAddrs = kmem_zalloc(sizeof(uint64_t) * cPages, KM_SLEEP);
1038 if (RT_LIKELY(paPhysAddrs))
1039 {
1040 /*
1041 * Prepare the pages for mapping according to type.
1042 */
1043 if ( pMemToMapSolaris->Core.enmType == RTR0MEMOBJTYPE_PHYS_NC
1044 && pMemToMapSolaris->fIndivPages)
1045 {
1046 page_t **ppPages = pMemToMapSolaris->pvHandle;
1047 AssertPtr(ppPages);
1048 for (size_t iPage = 0; iPage < cPages; iPage++)
1049 paPhysAddrs[iPage] = rtR0MemObjSolPagePhys(ppPages[iPage]);
1050 }
1051 else if ( pMemToMapSolaris->Core.enmType == RTR0MEMOBJTYPE_PHYS
1052 && pMemToMapSolaris->fLargePage)
1053 {
1054 RTHCPHYS Phys = pMemToMapSolaris->Core.u.Phys.PhysBase;
1055 for (size_t iPage = 0; iPage < cPages; iPage++, Phys += PAGE_SIZE)
1056 paPhysAddrs[iPage] = Phys;
1057 }
1058 else
1059 {
1060 /*
1061 * Have kernel mapping, just translate virtual to physical.
1062 */
1063 AssertPtr(pv);
1064 rc = VINF_SUCCESS;
1065 for (size_t iPage = 0; iPage < cPages; iPage++)
1066 {
1067 paPhysAddrs[iPage] = rtR0MemObjSolVirtToPhys(pv);
1068 if (RT_UNLIKELY(paPhysAddrs[iPage] == -(uint64_t)1))
1069 {
1070 LogRel(("rtR0MemObjNativeMapUser: no page to map.\n"));
1071 rc = VERR_MAP_FAILED;
1072 break;
1073 }
1074 pv = (void *)((uintptr_t)pv + PAGE_SIZE);
1075 }
1076 }
1077 if (RT_SUCCESS(rc))
1078 {
1079 unsigned fPageAccess = PROT_READ;
1080 if (fProt & RTMEM_PROT_WRITE)
1081 fPageAccess |= PROT_WRITE;
1082 if (fProt & RTMEM_PROT_EXEC)
1083 fPageAccess |= PROT_EXEC;
1084
1085 /*
1086 * Perform the actual mapping.
1087 */
1088 caddr_t UserAddr = NULL;
1089 rc = rtR0MemObjSolUserMap(&UserAddr, fPageAccess, paPhysAddrs, cb, PAGE_SIZE);
1090 if (RT_SUCCESS(rc))
1091 {
1092 pMemSolaris->Core.u.Mapping.R0Process = R0Process;
1093 pMemSolaris->Core.pv = UserAddr;
1094
1095 *ppMem = &pMemSolaris->Core;
1096 kmem_free(paPhysAddrs, sizeof(uint64_t) * cPages);
1097 return VINF_SUCCESS;
1098 }
1099
1100 LogRel(("rtR0MemObjNativeMapUser: rtR0MemObjSolUserMap failed rc=%d.\n", rc));
1101 }
1102
1103 rc = VERR_MAP_FAILED;
1104 kmem_free(paPhysAddrs, sizeof(uint64_t) * cPages);
1105 }
1106 else
1107 rc = VERR_NO_MEMORY;
1108 rtR0MemObjDelete(&pMemSolaris->Core);
1109 return rc;
1110}
1111
1112
1113DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
1114{
1115 NOREF(pMem);
1116 NOREF(offSub);
1117 NOREF(cbSub);
1118 NOREF(fProt);
1119 return VERR_NOT_SUPPORTED;
1120}
1121
1122
1123DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
1124{
1125 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)pMem;
1126
1127 switch (pMemSolaris->Core.enmType)
1128 {
1129 case RTR0MEMOBJTYPE_PHYS_NC:
1130 if ( pMemSolaris->Core.u.Phys.fAllocated
1131 || !pMemSolaris->fIndivPages)
1132 {
1133 uint8_t *pb = (uint8_t *)pMemSolaris->Core.pv + ((size_t)iPage << PAGE_SHIFT);
1134 return rtR0MemObjSolVirtToPhys(pb);
1135 }
1136 page_t **ppPages = pMemSolaris->pvHandle;
1137 return rtR0MemObjSolPagePhys(ppPages[iPage]);
1138
1139 case RTR0MEMOBJTYPE_PAGE:
1140 case RTR0MEMOBJTYPE_LOW:
1141 case RTR0MEMOBJTYPE_LOCK:
1142 {
1143 uint8_t *pb = (uint8_t *)pMemSolaris->Core.pv + ((size_t)iPage << PAGE_SHIFT);
1144 return rtR0MemObjSolVirtToPhys(pb);
1145 }
1146
1147 /*
1148 * Although mapping can be handled by rtR0MemObjSolVirtToPhys(offset) like the above case,
1149 * request it from the parent so that we have a clear distinction between CONT/PHYS_NC.
1150 */
1151 case RTR0MEMOBJTYPE_MAPPING:
1152 return rtR0MemObjNativeGetPagePhysAddr(pMemSolaris->Core.uRel.Child.pParent, iPage);
1153
1154 case RTR0MEMOBJTYPE_CONT:
1155 case RTR0MEMOBJTYPE_PHYS:
1156 AssertFailed(); /* handled by the caller */
1157 case RTR0MEMOBJTYPE_RES_VIRT:
1158 default:
1159 return NIL_RTHCPHYS;
1160 }
1161}
1162
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette