VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/solaris/memobj-r0drv-solaris.c@ 91482

Last change on this file since 91482 was 91482, checked in by vboxsync, 3 years ago

IPRT/memobj: Passing pszTag around...

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 41.5 KB
Line 
1/* $Id: memobj-r0drv-solaris.c 91482 2021-09-30 00:12:26Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, Solaris.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#include "the-solaris-kernel.h"
32#include "internal/iprt.h"
33#include <iprt/memobj.h>
34
35#include <iprt/asm.h>
36#include <iprt/assert.h>
37#include <iprt/err.h>
38#include <iprt/log.h>
39#include <iprt/mem.h>
40#include <iprt/param.h>
41#include <iprt/process.h>
42#include "internal/memobj.h"
43#include "memobj-r0drv-solaris.h"
44
45
46/*********************************************************************************************************************************
47* Defined Constants And Macros *
48*********************************************************************************************************************************/
49#define SOL_IS_KRNL_ADDR(vx) ((uintptr_t)(vx) >= kernelbase)
50
51
52/*********************************************************************************************************************************
53* Structures and Typedefs *
54*********************************************************************************************************************************/
55/**
56 * The Solaris version of the memory object structure.
57 */
58typedef struct RTR0MEMOBJSOL
59{
60 /** The core structure. */
61 RTR0MEMOBJINTERNAL Core;
62 /** Pointer to kernel memory cookie. */
63 ddi_umem_cookie_t Cookie;
64 /** Shadow locked pages. */
65 void *pvHandle;
66 /** Access during locking. */
67 int fAccess;
68 /** Set if large pages are involved in an RTR0MEMOBJTYPE_PHYS
69 * allocation. */
70 bool fLargePage;
71 /** Whether we have individual pages or a kernel-mapped virtual memory block in
72 * an RTR0MEMOBJTYPE_PHYS_NC allocation. */
73 bool fIndivPages;
74} RTR0MEMOBJSOL, *PRTR0MEMOBJSOL;
75
76
77/*********************************************************************************************************************************
78* Global Variables *
79*********************************************************************************************************************************/
80static vnode_t g_PageVnode;
81static kmutex_t g_OffsetMtx;
82static u_offset_t g_offPage;
83
84static vnode_t g_LargePageVnode;
85static kmutex_t g_LargePageOffsetMtx;
86static u_offset_t g_offLargePage;
87static bool g_fLargePageNoReloc;
88
89
90/**
91 * Returns the physical address for a virtual address.
92 *
93 * @param pv The virtual address.
94 *
95 * @returns The physical address corresponding to @a pv.
96 */
97static uint64_t rtR0MemObjSolVirtToPhys(void *pv)
98{
99 struct hat *pHat = NULL;
100 pfn_t PageFrameNum = 0;
101 uintptr_t uVirtAddr = (uintptr_t)pv;
102
103 if (SOL_IS_KRNL_ADDR(pv))
104 pHat = kas.a_hat;
105 else
106 {
107 proc_t *pProcess = (proc_t *)RTR0ProcHandleSelf();
108 AssertRelease(pProcess);
109 pHat = pProcess->p_as->a_hat;
110 }
111
112 PageFrameNum = hat_getpfnum(pHat, (caddr_t)(uVirtAddr & PAGEMASK));
113 AssertReleaseMsg(PageFrameNum != PFN_INVALID, ("rtR0MemObjSolVirtToPhys failed. pv=%p\n", pv));
114 return (((uint64_t)PageFrameNum << PAGE_SHIFT) | (uVirtAddr & PAGE_OFFSET_MASK));
115}
116
117
118/**
119 * Returns the physical address for a page.
120 *
121 * @param pPage Pointer to the page.
122 *
123 * @returns The physical address for a page.
124 */
125static inline uint64_t rtR0MemObjSolPagePhys(page_t *pPage)
126{
127 AssertPtr(pPage);
128 pfn_t PageFrameNum = page_pptonum(pPage);
129 AssertReleaseMsg(PageFrameNum != PFN_INVALID, ("rtR0MemObjSolPagePhys failed pPage=%p\n"));
130 return (uint64_t)PageFrameNum << PAGE_SHIFT;
131}
132
133
134/**
135 * Allocates one page.
136 *
137 * @param virtAddr The virtual address to which this page maybe mapped in
138 * the future.
139 *
140 * @returns Pointer to the allocated page, NULL on failure.
141 */
142static page_t *rtR0MemObjSolPageAlloc(caddr_t virtAddr)
143{
144 u_offset_t offPage;
145 seg_t KernelSeg;
146
147 /*
148 * 16777215 terabytes of total memory for all VMs or
149 * restart 8000 1GB VMs 2147483 times until wraparound!
150 */
151 mutex_enter(&g_OffsetMtx);
152 AssertCompileSize(u_offset_t, sizeof(uint64_t)); NOREF(RTASSERTVAR);
153 g_offPage = RT_ALIGN_64(g_offPage, PAGE_SIZE) + PAGE_SIZE;
154 offPage = g_offPage;
155 mutex_exit(&g_OffsetMtx);
156
157 KernelSeg.s_as = &kas;
158 page_t *pPage = page_create_va(&g_PageVnode, offPage, PAGE_SIZE, PG_WAIT | PG_NORELOC, &KernelSeg, virtAddr);
159 if (RT_LIKELY(pPage))
160 {
161 /*
162 * Lock this page into memory "long term" to prevent this page from being paged out
163 * when we drop the page lock temporarily (during free). Downgrade to a shared lock
164 * to prevent page relocation.
165 */
166 page_pp_lock(pPage, 0 /* COW */, 1 /* Kernel */);
167 page_io_unlock(pPage);
168 page_downgrade(pPage);
169 Assert(PAGE_LOCKED_SE(pPage, SE_SHARED));
170 }
171
172 return pPage;
173}
174
175
176/**
177 * Destroys an allocated page.
178 *
179 * @param pPage Pointer to the page to be destroyed.
180 * @remarks This function expects page in @c pPage to be shared locked.
181 */
182static void rtR0MemObjSolPageDestroy(page_t *pPage)
183{
184 /*
185 * We need to exclusive lock the pages before freeing them, if upgrading the shared lock to exclusive fails,
186 * drop the page lock and look it up from the hash. Record the page offset before we drop the page lock as
187 * we cannot touch any page_t members once the lock is dropped.
188 */
189 AssertPtr(pPage);
190 Assert(PAGE_LOCKED_SE(pPage, SE_SHARED));
191
192 u_offset_t offPage = pPage->p_offset;
193 int rc = page_tryupgrade(pPage);
194 if (!rc)
195 {
196 page_unlock(pPage);
197 page_t *pFoundPage = page_lookup(&g_PageVnode, offPage, SE_EXCL);
198
199 /*
200 * Since we allocated the pages as PG_NORELOC we should only get back the exact page always.
201 */
202 AssertReleaseMsg(pFoundPage == pPage, ("Page lookup failed %p:%llx returned %p, expected %p\n",
203 &g_PageVnode, offPage, pFoundPage, pPage));
204 }
205 Assert(PAGE_LOCKED_SE(pPage, SE_EXCL));
206 page_pp_unlock(pPage, 0 /* COW */, 1 /* Kernel */);
207 page_destroy(pPage, 0 /* move it to the free list */);
208}
209
210
211/* Currently not used on 32-bits, define it to shut up gcc. */
212#if HC_ARCH_BITS == 64
213/**
214 * Allocates physical, non-contiguous memory of pages.
215 *
216 * @param puPhys Where to store the physical address of first page. Optional,
217 * can be NULL.
218 * @param cb The size of the allocation.
219 *
220 * @return Array of allocated pages, NULL on failure.
221 */
222static page_t **rtR0MemObjSolPagesAlloc(uint64_t *puPhys, size_t cb)
223{
224 /*
225 * VM1:
226 * The page freelist and cachelist both hold pages that are not mapped into any address space.
227 * The cachelist is not really free pages but when memory is exhausted they'll be moved to the
228 * free lists, it's the total of the free+cache list that we see on the 'free' column in vmstat.
229 *
230 * VM2:
231 * @todo Document what happens behind the scenes in VM2 regarding the free and cachelist.
232 */
233
234 /*
235 * Non-pageable memory reservation request for _4K pages, don't sleep.
236 */
237 size_t cPages = (cb + PAGE_SIZE - 1) >> PAGE_SHIFT;
238 int rc = page_resv(cPages, KM_NOSLEEP);
239 if (rc)
240 {
241 size_t cbPages = cPages * sizeof(page_t *);
242 page_t **ppPages = kmem_zalloc(cbPages, KM_SLEEP);
243 if (RT_LIKELY(ppPages))
244 {
245 /*
246 * Get pages from kseg, the 'virtAddr' here is only for colouring but unfortunately
247 * we don't yet have the 'virtAddr' to which this memory may be mapped.
248 */
249 caddr_t virtAddr = 0;
250 for (size_t i = 0; i < cPages; i++, virtAddr += PAGE_SIZE)
251 {
252 /*
253 * Get a page from the free list locked exclusively. The page will be named (hashed in)
254 * and we rely on it during free. The page we get will be shared locked to prevent the page
255 * from being relocated.
256 */
257 page_t *pPage = rtR0MemObjSolPageAlloc(virtAddr);
258 if (RT_UNLIKELY(!pPage))
259 {
260 /*
261 * No page found, release whatever pages we grabbed so far.
262 */
263 for (size_t k = 0; k < i; k++)
264 rtR0MemObjSolPageDestroy(ppPages[k]);
265 kmem_free(ppPages, cbPages);
266 page_unresv(cPages);
267 return NULL;
268 }
269
270 ppPages[i] = pPage;
271 }
272
273 if (puPhys)
274 *puPhys = rtR0MemObjSolPagePhys(ppPages[0]);
275 return ppPages;
276 }
277
278 page_unresv(cPages);
279 }
280
281 return NULL;
282}
283#endif /* HC_ARCH_BITS == 64 */
284
285
286/**
287 * Frees the allocates pages.
288 *
289 * @param ppPages Pointer to the page list.
290 * @param cbPages Size of the allocation.
291 */
292static void rtR0MemObjSolPagesFree(page_t **ppPages, size_t cb)
293{
294 size_t cPages = (cb + PAGE_SIZE - 1) >> PAGE_SHIFT;
295 size_t cbPages = cPages * sizeof(page_t *);
296 for (size_t iPage = 0; iPage < cPages; iPage++)
297 rtR0MemObjSolPageDestroy(ppPages[iPage]);
298
299 kmem_free(ppPages, cbPages);
300 page_unresv(cPages);
301}
302
303
304/**
305 * Allocates one large page.
306 *
307 * @param puPhys Where to store the physical address of the allocated
308 * page. Optional, can be NULL.
309 * @param cbLargePage Size of the large page.
310 *
311 * @returns Pointer to a list of pages that cover the large page, NULL on
312 * failure.
313 */
314static page_t **rtR0MemObjSolLargePageAlloc(uint64_t *puPhys, size_t cbLargePage)
315{
316 /*
317 * Check PG_NORELOC support for large pages. Using this helps prevent _1G page
318 * fragementation on systems that support it.
319 */
320 static bool fPageNoRelocChecked = false;
321 if (fPageNoRelocChecked == false)
322 {
323 fPageNoRelocChecked = true;
324 g_fLargePageNoReloc = false;
325 if ( g_pfnrtR0Sol_page_noreloc_supported
326 && g_pfnrtR0Sol_page_noreloc_supported(cbLargePage))
327 {
328 g_fLargePageNoReloc = true;
329 }
330 }
331
332 /*
333 * Non-pageable memory reservation request for _4K pages, don't sleep.
334 */
335 size_t cPages = (cbLargePage + PAGE_SIZE - 1) >> PAGE_SHIFT;
336 size_t cbPages = cPages * sizeof(page_t *);
337 u_offset_t offPage = 0;
338 int rc = page_resv(cPages, KM_NOSLEEP);
339 if (rc)
340 {
341 page_t **ppPages = kmem_zalloc(cbPages, KM_SLEEP);
342 if (RT_LIKELY(ppPages))
343 {
344 mutex_enter(&g_LargePageOffsetMtx);
345 AssertCompileSize(u_offset_t, sizeof(uint64_t)); NOREF(RTASSERTVAR);
346 g_offLargePage = RT_ALIGN_64(g_offLargePage, cbLargePage) + cbLargePage;
347 offPage = g_offLargePage;
348 mutex_exit(&g_LargePageOffsetMtx);
349
350 seg_t KernelSeg;
351 KernelSeg.s_as = &kas;
352 page_t *pRootPage = page_create_va_large(&g_LargePageVnode, offPage, cbLargePage,
353 PG_EXCL | (g_fLargePageNoReloc ? PG_NORELOC : 0), &KernelSeg,
354 0 /* vaddr */,NULL /* locality group */);
355 if (pRootPage)
356 {
357 /*
358 * Split it into sub-pages, downgrade each page to a shared lock to prevent page relocation.
359 */
360 page_t *pPageList = pRootPage;
361 for (size_t iPage = 0; iPage < cPages; iPage++)
362 {
363 page_t *pPage = pPageList;
364 AssertPtr(pPage);
365 AssertMsg(page_pptonum(pPage) == iPage + page_pptonum(pRootPage),
366 ("%p:%lx %lx+%lx\n", pPage, page_pptonum(pPage), iPage, page_pptonum(pRootPage)));
367 AssertMsg(pPage->p_szc == pRootPage->p_szc, ("Size code mismatch %p %d %d\n", pPage,
368 (int)pPage->p_szc, (int)pRootPage->p_szc));
369
370 /*
371 * Lock the page into memory "long term". This prevents callers of page_try_demote_pages() (such as the
372 * pageout scanner) from demoting the large page into smaller pages while we temporarily release the
373 * exclusive lock (during free). We pass "0, 1" since we've already accounted for availrmem during
374 * page_resv().
375 */
376 page_pp_lock(pPage, 0 /* COW */, 1 /* Kernel */);
377
378 page_sub(&pPageList, pPage);
379 page_io_unlock(pPage);
380 page_downgrade(pPage);
381 Assert(PAGE_LOCKED_SE(pPage, SE_SHARED));
382
383 ppPages[iPage] = pPage;
384 }
385 Assert(pPageList == NULL);
386 Assert(ppPages[0] == pRootPage);
387
388 uint64_t uPhys = rtR0MemObjSolPagePhys(pRootPage);
389 AssertMsg(!(uPhys & (cbLargePage - 1)), ("%llx %zx\n", uPhys, cbLargePage));
390 if (puPhys)
391 *puPhys = uPhys;
392 return ppPages;
393 }
394
395 /*
396 * Don't restore offPrev in case of failure (race condition), we have plenty of offset space.
397 * The offset must be unique (for the same vnode) or we'll encounter panics on page_create_va_large().
398 */
399 kmem_free(ppPages, cbPages);
400 }
401
402 page_unresv(cPages);
403 }
404 return NULL;
405}
406
407
408/**
409 * Frees the large page.
410 *
411 * @param ppPages Pointer to the list of small pages that cover the
412 * large page.
413 * @param cbLargePage Size of the allocation (i.e. size of the large
414 * page).
415 */
416static void rtR0MemObjSolLargePageFree(page_t **ppPages, size_t cbLargePage)
417{
418 Assert(ppPages);
419 Assert(cbLargePage > PAGE_SIZE);
420
421 bool fDemoted = false;
422 size_t cPages = (cbLargePage + PAGE_SIZE - 1) >> PAGE_SHIFT;
423 size_t cbPages = cPages * sizeof(page_t *);
424 page_t *pPageList = ppPages[0];
425
426 for (size_t iPage = 0; iPage < cPages; iPage++)
427 {
428 /*
429 * We need the pages exclusively locked, try upgrading the shared lock.
430 * If it fails, drop the shared page lock (cannot access any page_t members once this is done)
431 * and lookup the page from the page hash locking it exclusively.
432 */
433 page_t *pPage = ppPages[iPage];
434 u_offset_t offPage = pPage->p_offset;
435 int rc = page_tryupgrade(pPage);
436 if (!rc)
437 {
438 page_unlock(pPage);
439 page_t *pFoundPage = page_lookup(&g_LargePageVnode, offPage, SE_EXCL);
440 AssertRelease(pFoundPage);
441
442 if (g_fLargePageNoReloc)
443 {
444 /*
445 * This can only be guaranteed if PG_NORELOC is used while allocating the pages.
446 */
447 AssertReleaseMsg(pFoundPage == pPage,
448 ("lookup failed %p:%llu returned %p, expected %p\n", &g_LargePageVnode, offPage,
449 pFoundPage, pPage));
450 }
451
452 /*
453 * Check for page demotion (regardless of relocation). Some places in Solaris (e.g. VM1 page_retire())
454 * could possibly demote the large page to _4K pages between our call to page_unlock() and page_lookup().
455 */
456 if (page_get_pagecnt(pFoundPage->p_szc) == 1) /* Base size of only _4K associated with this page. */
457 fDemoted = true;
458 pPage = pFoundPage;
459 ppPages[iPage] = pFoundPage;
460 }
461 Assert(PAGE_LOCKED_SE(pPage, SE_EXCL));
462 page_pp_unlock(pPage, 0 /* COW */, 1 /* Kernel */);
463 }
464
465 if (fDemoted)
466 {
467 for (size_t iPage = 0; iPage < cPages; iPage++)
468 {
469 Assert(page_get_pagecnt(ppPages[iPage]->p_szc) == 1);
470 page_destroy(ppPages[iPage], 0 /* move it to the free list */);
471 }
472 }
473 else
474 {
475 /*
476 * Although we shred the adjacent pages in the linked list, page_destroy_pages works on
477 * adjacent pages via array increments. So this does indeed free all the pages.
478 */
479 AssertPtr(pPageList);
480 page_destroy_pages(pPageList);
481 }
482 kmem_free(ppPages, cbPages);
483 page_unresv(cPages);
484}
485
486
487/**
488 * Unmaps kernel/user-space mapped memory.
489 *
490 * @param pv Pointer to the mapped memory block.
491 * @param cb Size of the memory block.
492 */
493static void rtR0MemObjSolUnmap(void *pv, size_t cb)
494{
495 if (SOL_IS_KRNL_ADDR(pv))
496 {
497 hat_unload(kas.a_hat, pv, cb, HAT_UNLOAD | HAT_UNLOAD_UNLOCK);
498 vmem_free(heap_arena, pv, cb);
499 }
500 else
501 {
502 struct as *pAddrSpace = ((proc_t *)RTR0ProcHandleSelf())->p_as;
503 AssertPtr(pAddrSpace);
504 as_rangelock(pAddrSpace);
505 as_unmap(pAddrSpace, pv, cb);
506 as_rangeunlock(pAddrSpace);
507 }
508}
509
510
511/**
512 * Lock down memory mappings for a virtual address.
513 *
514 * @param pv Pointer to the memory to lock down.
515 * @param cb Size of the memory block.
516 * @param fAccess Page access rights (S_READ, S_WRITE, S_EXEC)
517 *
518 * @returns IPRT status code.
519 */
520static int rtR0MemObjSolLock(void *pv, size_t cb, int fPageAccess)
521{
522 /*
523 * Kernel memory mappings on x86/amd64 are always locked, only handle user-space memory.
524 */
525 if (!SOL_IS_KRNL_ADDR(pv))
526 {
527 proc_t *pProc = (proc_t *)RTR0ProcHandleSelf();
528 AssertPtr(pProc);
529 faultcode_t rc = as_fault(pProc->p_as->a_hat, pProc->p_as, (caddr_t)pv, cb, F_SOFTLOCK, fPageAccess);
530 if (rc)
531 {
532 LogRel(("rtR0MemObjSolLock failed for pv=%pv cb=%lx fPageAccess=%d rc=%d\n", pv, cb, fPageAccess, rc));
533 return VERR_LOCK_FAILED;
534 }
535 }
536 return VINF_SUCCESS;
537}
538
539
540/**
541 * Unlock memory mappings for a virtual address.
542 *
543 * @param pv Pointer to the locked memory.
544 * @param cb Size of the memory block.
545 * @param fPageAccess Page access rights (S_READ, S_WRITE, S_EXEC).
546 */
547static void rtR0MemObjSolUnlock(void *pv, size_t cb, int fPageAccess)
548{
549 if (!SOL_IS_KRNL_ADDR(pv))
550 {
551 proc_t *pProcess = (proc_t *)RTR0ProcHandleSelf();
552 AssertPtr(pProcess);
553 as_fault(pProcess->p_as->a_hat, pProcess->p_as, (caddr_t)pv, cb, F_SOFTUNLOCK, fPageAccess);
554 }
555}
556
557
558/**
559 * Maps a list of physical pages into user address space.
560 *
561 * @param pVirtAddr Where to store the virtual address of the mapping.
562 * @param fPageAccess Page access rights (PROT_READ, PROT_WRITE,
563 * PROT_EXEC)
564 * @param paPhysAddrs Array of physical addresses to pages.
565 * @param cb Size of memory being mapped.
566 *
567 * @returns IPRT status code.
568 */
569static int rtR0MemObjSolUserMap(caddr_t *pVirtAddr, unsigned fPageAccess, uint64_t *paPhysAddrs, size_t cb, size_t cbPageSize)
570{
571 struct as *pAddrSpace = ((proc_t *)RTR0ProcHandleSelf())->p_as;
572 int rc;
573 SEGVBOX_CRARGS Args;
574
575 Args.paPhysAddrs = paPhysAddrs;
576 Args.fPageAccess = fPageAccess;
577 Args.cbPageSize = cbPageSize;
578
579 as_rangelock(pAddrSpace);
580 if (g_frtSolOldMapAddr)
581 g_rtSolMapAddr.u.pfnSol_map_addr_old(pVirtAddr, cb, 0 /* offset */, 0 /* vacalign */, MAP_SHARED);
582 else
583 g_rtSolMapAddr.u.pfnSol_map_addr(pVirtAddr, cb, 0 /* offset */, MAP_SHARED);
584 if (*pVirtAddr != NULL)
585 rc = as_map(pAddrSpace, *pVirtAddr, cb, rtR0SegVBoxSolCreate, &Args);
586 else
587 rc = ENOMEM;
588 as_rangeunlock(pAddrSpace);
589
590 return RTErrConvertFromErrno(rc);
591}
592
593
594DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
595{
596 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)pMem;
597
598 switch (pMemSolaris->Core.enmType)
599 {
600 case RTR0MEMOBJTYPE_LOW:
601 rtR0SolMemFree(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
602 break;
603
604 case RTR0MEMOBJTYPE_PHYS:
605 if (pMemSolaris->Core.u.Phys.fAllocated)
606 {
607 if (pMemSolaris->fLargePage)
608 rtR0MemObjSolLargePageFree(pMemSolaris->pvHandle, pMemSolaris->Core.cb);
609 else
610 rtR0SolMemFree(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
611 }
612 break;
613
614 case RTR0MEMOBJTYPE_PHYS_NC:
615 if (pMemSolaris->fIndivPages)
616 rtR0MemObjSolPagesFree(pMemSolaris->pvHandle, pMemSolaris->Core.cb);
617 else
618 rtR0SolMemFree(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
619 break;
620
621 case RTR0MEMOBJTYPE_PAGE:
622 ddi_umem_free(pMemSolaris->Cookie);
623 break;
624
625 case RTR0MEMOBJTYPE_LOCK:
626 rtR0MemObjSolUnlock(pMemSolaris->Core.pv, pMemSolaris->Core.cb, pMemSolaris->fAccess);
627 break;
628
629 case RTR0MEMOBJTYPE_MAPPING:
630 rtR0MemObjSolUnmap(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
631 break;
632
633 case RTR0MEMOBJTYPE_RES_VIRT:
634 {
635 if (pMemSolaris->Core.u.ResVirt.R0Process == NIL_RTR0PROCESS)
636 vmem_xfree(heap_arena, pMemSolaris->Core.pv, pMemSolaris->Core.cb);
637 else
638 AssertFailed();
639 break;
640 }
641
642 case RTR0MEMOBJTYPE_CONT: /* we don't use this type here. */
643 default:
644 AssertMsgFailed(("enmType=%d\n", pMemSolaris->Core.enmType));
645 return VERR_INTERNAL_ERROR;
646 }
647
648 return VINF_SUCCESS;
649}
650
651
652DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
653{
654 /* Create the object. */
655 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PAGE, NULL, cb, NULL);
656 if (RT_UNLIKELY(!pMemSolaris))
657 return VERR_NO_MEMORY;
658
659 void *pvMem = ddi_umem_alloc(cb, DDI_UMEM_SLEEP, &pMemSolaris->Cookie);
660 if (RT_UNLIKELY(!pvMem))
661 {
662 rtR0MemObjDelete(&pMemSolaris->Core);
663 return VERR_NO_PAGE_MEMORY;
664 }
665
666 pMemSolaris->Core.pv = pvMem;
667 pMemSolaris->pvHandle = NULL;
668 *ppMem = &pMemSolaris->Core;
669 return VINF_SUCCESS;
670}
671
672
673DECLHIDDEN(int) rtR0MemObjNativeAllocLarge(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, size_t cbLargePage, uint32_t fFlags,
674 const char *pszTag)
675{
676 return rtR0MemObjFallbackAllocLarge(ppMem, cb, cbLargePage, fFlags, pszTag);
677}
678
679
680DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
681{
682 NOREF(fExecutable);
683
684 /* Create the object */
685 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOW, NULL, cb, NULL);
686 if (!pMemSolaris)
687 return VERR_NO_MEMORY;
688
689 /* Allocate physically low page-aligned memory. */
690 uint64_t uPhysHi = _4G - 1;
691 void *pvMem = rtR0SolMemAlloc(uPhysHi, NULL /* puPhys */, cb, PAGE_SIZE, false /* fContig */);
692 if (RT_UNLIKELY(!pvMem))
693 {
694 rtR0MemObjDelete(&pMemSolaris->Core);
695 return VERR_NO_LOW_MEMORY;
696 }
697 pMemSolaris->Core.pv = pvMem;
698 pMemSolaris->pvHandle = NULL;
699 *ppMem = &pMemSolaris->Core;
700 return VINF_SUCCESS;
701}
702
703
704DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
705{
706 NOREF(fExecutable);
707 return rtR0MemObjNativeAllocPhys(ppMem, cb, _4G - 1, PAGE_SIZE /* alignment */, NULL);
708}
709
710
711DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, const char *pszTag)
712{
713#if HC_ARCH_BITS == 64
714 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS_NC, NULL, cb, pszTag);
715 if (RT_UNLIKELY(!pMemSolaris))
716 return VERR_NO_MEMORY;
717
718 if (PhysHighest == NIL_RTHCPHYS)
719 {
720 uint64_t PhysAddr = UINT64_MAX;
721 void *pvPages = rtR0MemObjSolPagesAlloc(&PhysAddr, cb);
722 if (!pvPages)
723 {
724 LogRel(("rtR0MemObjNativeAllocPhysNC: rtR0MemObjSolPagesAlloc failed for cb=%u.\n", cb));
725 rtR0MemObjDelete(&pMemSolaris->Core);
726 return VERR_NO_MEMORY;
727 }
728 Assert(PhysAddr != UINT64_MAX);
729 Assert(!(PhysAddr & PAGE_OFFSET_MASK));
730
731 pMemSolaris->Core.pv = NULL;
732 pMemSolaris->pvHandle = pvPages;
733 pMemSolaris->fIndivPages = true;
734 *ppMem = &pMemSolaris->Core;
735 return VINF_SUCCESS;
736 }
737 else
738 {
739 /*
740 * If we must satisfy an upper limit constraint, it isn't feasible to grab individual pages.
741 * We fall back to using contig_alloc().
742 */
743 uint64_t PhysAddr = UINT64_MAX;
744 void *pvMem = rtR0SolMemAlloc(PhysHighest, &PhysAddr, cb, PAGE_SIZE, false /* fContig */);
745 if (!pvMem)
746 {
747 LogRel(("rtR0MemObjNativeAllocPhysNC: rtR0SolMemAlloc failed for cb=%u PhysHighest=%RHp.\n", cb, PhysHighest));
748 rtR0MemObjDelete(&pMemSolaris->Core);
749 return VERR_NO_MEMORY;
750 }
751 Assert(PhysAddr != UINT64_MAX);
752 Assert(!(PhysAddr & PAGE_OFFSET_MASK));
753
754 pMemSolaris->Core.pv = pvMem;
755 pMemSolaris->pvHandle = NULL;
756 pMemSolaris->fIndivPages = false;
757 *ppMem = &pMemSolaris->Core;
758 return VINF_SUCCESS;
759 }
760
761#else /* 32 bit: */
762 return VERR_NOT_SUPPORTED; /* see the RTR0MemObjAllocPhysNC specs */
763#endif
764}
765
766
767DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment,
768 const char *pszTag)
769{
770 AssertMsgReturn(PhysHighest >= 16 *_1M, ("PhysHigest=%RHp\n", PhysHighest), VERR_NOT_SUPPORTED);
771
772 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS, NULL, cb, pszTag);
773 if (RT_UNLIKELY(!pMemSolaris))
774 return VERR_NO_MEMORY;
775
776 /*
777 * Allocating one large page gets special treatment.
778 */
779 static uint32_t s_cbLargePage = UINT32_MAX;
780 if (s_cbLargePage == UINT32_MAX)
781 {
782 if (page_num_pagesizes() > 1)
783 ASMAtomicWriteU32(&s_cbLargePage, page_get_pagesize(1)); /* Page-size code 1 maps to _2M on Solaris x86/amd64. */
784 else
785 ASMAtomicWriteU32(&s_cbLargePage, 0);
786 }
787
788 uint64_t PhysAddr;
789 if ( cb == s_cbLargePage
790 && cb == uAlignment
791 && PhysHighest == NIL_RTHCPHYS)
792 {
793 /*
794 * Allocate one large page (backed by physically contiguous memory).
795 */
796 void *pvPages = rtR0MemObjSolLargePageAlloc(&PhysAddr, cb);
797 if (RT_LIKELY(pvPages))
798 {
799 AssertMsg(!(PhysAddr & (cb - 1)), ("%RHp\n", PhysAddr));
800 pMemSolaris->Core.pv = NULL;
801 pMemSolaris->Core.u.Phys.PhysBase = PhysAddr;
802 pMemSolaris->Core.u.Phys.fAllocated = true;
803 pMemSolaris->pvHandle = pvPages;
804 pMemSolaris->fLargePage = true;
805
806 *ppMem = &pMemSolaris->Core;
807 return VINF_SUCCESS;
808 }
809 }
810 else
811 {
812 /*
813 * Allocate physically contiguous memory aligned as specified.
814 */
815 AssertCompile(NIL_RTHCPHYS == UINT64_MAX); NOREF(RTASSERTVAR);
816 PhysAddr = PhysHighest;
817 void *pvMem = rtR0SolMemAlloc(PhysHighest, &PhysAddr, cb, uAlignment, true /* fContig */);
818 if (RT_LIKELY(pvMem))
819 {
820 Assert(!(PhysAddr & PAGE_OFFSET_MASK));
821 Assert(PhysAddr < PhysHighest);
822 Assert(PhysAddr + cb <= PhysHighest);
823
824 pMemSolaris->Core.pv = pvMem;
825 pMemSolaris->Core.u.Phys.PhysBase = PhysAddr;
826 pMemSolaris->Core.u.Phys.fAllocated = true;
827 pMemSolaris->pvHandle = NULL;
828 pMemSolaris->fLargePage = false;
829
830 *ppMem = &pMemSolaris->Core;
831 return VINF_SUCCESS;
832 }
833 }
834 rtR0MemObjDelete(&pMemSolaris->Core);
835 return VERR_NO_CONT_MEMORY;
836}
837
838
839DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy,
840 const char *pszTag)
841{
842 AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE, VERR_NOT_SUPPORTED);
843
844 /* Create the object. */
845 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS, NULL, cb, pszTag);
846 if (!pMemSolaris)
847 return VERR_NO_MEMORY;
848
849 /* There is no allocation here, it needs to be mapped somewhere first. */
850 pMemSolaris->Core.u.Phys.fAllocated = false;
851 pMemSolaris->Core.u.Phys.PhysBase = Phys;
852 pMemSolaris->Core.u.Phys.uCachePolicy = uCachePolicy;
853 *ppMem = &pMemSolaris->Core;
854 return VINF_SUCCESS;
855}
856
857
858DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess,
859 RTR0PROCESS R0Process, const char *pszTag)
860{
861 AssertReturn(R0Process == RTR0ProcHandleSelf(), VERR_INVALID_PARAMETER);
862 NOREF(fAccess);
863
864 /* Create the locking object */
865 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOCK,
866 (void *)R3Ptr, cb, pszTag);
867 if (!pMemSolaris)
868 return VERR_NO_MEMORY;
869
870 /* Lock down user pages. */
871 int fPageAccess = S_READ;
872 if (fAccess & RTMEM_PROT_WRITE)
873 fPageAccess = S_WRITE;
874 if (fAccess & RTMEM_PROT_EXEC)
875 fPageAccess = S_EXEC;
876 int rc = rtR0MemObjSolLock((void *)R3Ptr, cb, fPageAccess);
877 if (RT_FAILURE(rc))
878 {
879 LogRel(("rtR0MemObjNativeLockUser: rtR0MemObjSolLock failed rc=%d\n", rc));
880 rtR0MemObjDelete(&pMemSolaris->Core);
881 return rc;
882 }
883
884 /* Fill in the object attributes and return successfully. */
885 pMemSolaris->Core.u.Lock.R0Process = R0Process;
886 pMemSolaris->pvHandle = NULL;
887 pMemSolaris->fAccess = fPageAccess;
888 *ppMem = &pMemSolaris->Core;
889 return VINF_SUCCESS;
890}
891
892
893DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess, const char *pszTag)
894{
895 NOREF(fAccess);
896
897 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOCK, pv, cb, pszTag);
898 if (!pMemSolaris)
899 return VERR_NO_MEMORY;
900
901 /* Lock down kernel pages. */
902 int fPageAccess = S_READ;
903 if (fAccess & RTMEM_PROT_WRITE)
904 fPageAccess = S_WRITE;
905 if (fAccess & RTMEM_PROT_EXEC)
906 fPageAccess = S_EXEC;
907 int rc = rtR0MemObjSolLock(pv, cb, fPageAccess);
908 if (RT_FAILURE(rc))
909 {
910 LogRel(("rtR0MemObjNativeLockKernel: rtR0MemObjSolLock failed rc=%d\n", rc));
911 rtR0MemObjDelete(&pMemSolaris->Core);
912 return rc;
913 }
914
915 /* Fill in the object attributes and return successfully. */
916 pMemSolaris->Core.u.Lock.R0Process = NIL_RTR0PROCESS;
917 pMemSolaris->pvHandle = NULL;
918 pMemSolaris->fAccess = fPageAccess;
919 *ppMem = &pMemSolaris->Core;
920 return VINF_SUCCESS;
921}
922
923
924DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment,
925 const char *pszTag)
926{
927 PRTR0MEMOBJSOL pMemSolaris;
928
929 /*
930 * Use xalloc.
931 */
932 void *pv = vmem_xalloc(heap_arena, cb, uAlignment, 0 /* phase */, 0 /* nocross */,
933 NULL /* minaddr */, NULL /* maxaddr */, VM_SLEEP);
934 if (RT_UNLIKELY(!pv))
935 return VERR_NO_MEMORY;
936
937 /* Create the object. */
938 pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_RES_VIRT, pv, cb, pszTag);
939 if (!pMemSolaris)
940 {
941 LogRel(("rtR0MemObjNativeReserveKernel failed to alloc memory object.\n"));
942 vmem_xfree(heap_arena, pv, cb);
943 return VERR_NO_MEMORY;
944 }
945
946 pMemSolaris->Core.u.ResVirt.R0Process = NIL_RTR0PROCESS;
947 *ppMem = &pMemSolaris->Core;
948 return VINF_SUCCESS;
949}
950
951
952DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment,
953 RTR0PROCESS R0Process, const char *pszTag)
954{
955 RT_NOREF(ppMem, R3PtrFixed, cb, uAlignment, R0Process, pszTag);
956 return VERR_NOT_SUPPORTED;
957}
958
959
960DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
961 unsigned fProt, size_t offSub, size_t cbSub, const char *pszTag)
962{
963 /* Fail if requested to do something we can't. */
964 AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);
965 if (uAlignment > PAGE_SIZE)
966 return VERR_NOT_SUPPORTED;
967
968 /*
969 * Use xalloc to get address space.
970 */
971 if (!cbSub)
972 cbSub = pMemToMap->cb;
973 void *pv = vmem_xalloc(heap_arena, cbSub, uAlignment, 0 /* phase */, 0 /* nocross */,
974 NULL /* minaddr */, NULL /* maxaddr */, VM_SLEEP);
975 if (RT_UNLIKELY(!pv))
976 return VERR_MAP_FAILED;
977
978 /*
979 * Load the pages from the other object into it.
980 */
981 uint32_t fAttr = HAT_UNORDERED_OK | HAT_MERGING_OK | HAT_LOADCACHING_OK | HAT_STORECACHING_OK;
982 if (fProt & RTMEM_PROT_READ)
983 fAttr |= PROT_READ;
984 if (fProt & RTMEM_PROT_EXEC)
985 fAttr |= PROT_EXEC;
986 if (fProt & RTMEM_PROT_WRITE)
987 fAttr |= PROT_WRITE;
988 fAttr |= HAT_NOSYNC;
989
990 int rc = VINF_SUCCESS;
991 size_t off = 0;
992 while (off < cbSub)
993 {
994 RTHCPHYS HCPhys = RTR0MemObjGetPagePhysAddr(pMemToMap, (offSub + off) >> PAGE_SHIFT);
995 AssertBreakStmt(HCPhys != NIL_RTHCPHYS, rc = VERR_INTERNAL_ERROR_2);
996 pfn_t pfn = HCPhys >> PAGE_SHIFT;
997 AssertBreakStmt(((RTHCPHYS)pfn << PAGE_SHIFT) == HCPhys, rc = VERR_INTERNAL_ERROR_3);
998
999 hat_devload(kas.a_hat, (uint8_t *)pv + off, PAGE_SIZE, pfn, fAttr, HAT_LOAD_LOCK);
1000
1001 /* Advance. */
1002 off += PAGE_SIZE;
1003 }
1004 if (RT_SUCCESS(rc))
1005 {
1006 /*
1007 * Create a memory object for the mapping.
1008 */
1009 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_MAPPING,
1010 pv, cbSub, pszTag);
1011 if (pMemSolaris)
1012 {
1013 pMemSolaris->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
1014 *ppMem = &pMemSolaris->Core;
1015 return VINF_SUCCESS;
1016 }
1017
1018 LogRel(("rtR0MemObjNativeMapKernel failed to alloc memory object.\n"));
1019 rc = VERR_NO_MEMORY;
1020 }
1021
1022 if (off)
1023 hat_unload(kas.a_hat, pv, off, HAT_UNLOAD | HAT_UNLOAD_UNLOCK);
1024 vmem_xfree(heap_arena, pv, cbSub);
1025 return rc;
1026}
1027
1028
1029DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, PRTR0MEMOBJINTERNAL pMemToMap, RTR3PTR R3PtrFixed,
1030 size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process, size_t offSub, size_t cbSub,
1031 const char *pszTag)
1032{
1033 /*
1034 * Fend off things we cannot do.
1035 */
1036 AssertMsgReturn(R3PtrFixed == (RTR3PTR)-1, ("%p\n", R3PtrFixed), VERR_NOT_SUPPORTED);
1037 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
1038 if (uAlignment != PAGE_SIZE)
1039 return VERR_NOT_SUPPORTED;
1040
1041 /*
1042 * Get parameters from the source object and offSub/cbSub.
1043 */
1044 PRTR0MEMOBJSOL pMemToMapSolaris = (PRTR0MEMOBJSOL)pMemToMap;
1045 uint8_t *pb = pMemToMapSolaris->Core.pv ? (uint8_t *)pMemToMapSolaris->Core.pv + offSub : NULL;
1046 size_t const cb = cbSub ? cbSub : pMemToMapSolaris->Core.cb;
1047 size_t const cPages = cb >> PAGE_SHIFT;
1048 Assert(!offSub || cbSub);
1049 Assert(!(cb & PAGE_OFFSET_MASK));
1050
1051 /*
1052 * Create the mapping object
1053 */
1054 PRTR0MEMOBJSOL pMemSolaris;
1055 pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_MAPPING, pb, cb, pszTag);
1056 if (RT_UNLIKELY(!pMemSolaris))
1057 return VERR_NO_MEMORY;
1058
1059 /*
1060 * Gather the physical page address of the pages to be mapped.
1061 */
1062 int rc = VINF_SUCCESS;
1063 uint64_t *paPhysAddrs = kmem_zalloc(sizeof(uint64_t) * cPages, KM_SLEEP);
1064 if (RT_LIKELY(paPhysAddrs))
1065 {
1066 if ( pMemToMapSolaris->Core.enmType == RTR0MEMOBJTYPE_PHYS_NC
1067 && pMemToMapSolaris->fIndivPages)
1068 {
1069 /* Translate individual page_t to physical addresses. */
1070 page_t **papPages = pMemToMapSolaris->pvHandle;
1071 AssertPtr(papPages);
1072 papPages += offSub >> PAGE_SHIFT;
1073 for (size_t iPage = 0; iPage < cPages; iPage++)
1074 paPhysAddrs[iPage] = rtR0MemObjSolPagePhys(papPages[iPage]);
1075 }
1076 else if ( pMemToMapSolaris->Core.enmType == RTR0MEMOBJTYPE_PHYS
1077 && pMemToMapSolaris->fLargePage)
1078 {
1079 /* Split up the large page into page-sized chunks. */
1080 RTHCPHYS Phys = pMemToMapSolaris->Core.u.Phys.PhysBase;
1081 Phys += offSub;
1082 for (size_t iPage = 0; iPage < cPages; iPage++, Phys += PAGE_SIZE)
1083 paPhysAddrs[iPage] = Phys;
1084 }
1085 else
1086 {
1087 /* Have kernel mapping, just translate virtual to physical. */
1088 AssertPtr(pb);
1089 for (size_t iPage = 0; iPage < cPages; iPage++)
1090 {
1091 paPhysAddrs[iPage] = rtR0MemObjSolVirtToPhys(pb);
1092 if (RT_UNLIKELY(paPhysAddrs[iPage] == -(uint64_t)1))
1093 {
1094 LogRel(("rtR0MemObjNativeMapUser: no page to map.\n"));
1095 rc = VERR_MAP_FAILED;
1096 break;
1097 }
1098 pb += PAGE_SIZE;
1099 }
1100 }
1101 if (RT_SUCCESS(rc))
1102 {
1103 /*
1104 * Perform the actual mapping.
1105 */
1106 unsigned fPageAccess = PROT_READ;
1107 if (fProt & RTMEM_PROT_WRITE)
1108 fPageAccess |= PROT_WRITE;
1109 if (fProt & RTMEM_PROT_EXEC)
1110 fPageAccess |= PROT_EXEC;
1111
1112 caddr_t UserAddr = NULL;
1113 rc = rtR0MemObjSolUserMap(&UserAddr, fPageAccess, paPhysAddrs, cb, PAGE_SIZE);
1114 if (RT_SUCCESS(rc))
1115 {
1116 pMemSolaris->Core.u.Mapping.R0Process = R0Process;
1117 pMemSolaris->Core.pv = UserAddr;
1118
1119 *ppMem = &pMemSolaris->Core;
1120 kmem_free(paPhysAddrs, sizeof(uint64_t) * cPages);
1121 return VINF_SUCCESS;
1122 }
1123
1124 LogRel(("rtR0MemObjNativeMapUser: rtR0MemObjSolUserMap failed rc=%d.\n", rc));
1125 }
1126
1127 rc = VERR_MAP_FAILED;
1128 kmem_free(paPhysAddrs, sizeof(uint64_t) * cPages);
1129 }
1130 else
1131 rc = VERR_NO_MEMORY;
1132 rtR0MemObjDelete(&pMemSolaris->Core);
1133 return rc;
1134}
1135
1136
1137DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
1138{
1139 NOREF(pMem);
1140 NOREF(offSub);
1141 NOREF(cbSub);
1142 NOREF(fProt);
1143 return VERR_NOT_SUPPORTED;
1144}
1145
1146
1147DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
1148{
1149 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)pMem;
1150
1151 switch (pMemSolaris->Core.enmType)
1152 {
1153 case RTR0MEMOBJTYPE_PHYS_NC:
1154 if ( pMemSolaris->Core.u.Phys.fAllocated
1155 || !pMemSolaris->fIndivPages)
1156 {
1157 uint8_t *pb = (uint8_t *)pMemSolaris->Core.pv + ((size_t)iPage << PAGE_SHIFT);
1158 return rtR0MemObjSolVirtToPhys(pb);
1159 }
1160 page_t **ppPages = pMemSolaris->pvHandle;
1161 return rtR0MemObjSolPagePhys(ppPages[iPage]);
1162
1163 case RTR0MEMOBJTYPE_PAGE:
1164 case RTR0MEMOBJTYPE_LOW:
1165 case RTR0MEMOBJTYPE_LOCK:
1166 {
1167 uint8_t *pb = (uint8_t *)pMemSolaris->Core.pv + ((size_t)iPage << PAGE_SHIFT);
1168 return rtR0MemObjSolVirtToPhys(pb);
1169 }
1170
1171 /*
1172 * Although mapping can be handled by rtR0MemObjSolVirtToPhys(offset) like the above case,
1173 * request it from the parent so that we have a clear distinction between CONT/PHYS_NC.
1174 */
1175 case RTR0MEMOBJTYPE_MAPPING:
1176 return rtR0MemObjNativeGetPagePhysAddr(pMemSolaris->Core.uRel.Child.pParent, iPage);
1177
1178 case RTR0MEMOBJTYPE_CONT:
1179 case RTR0MEMOBJTYPE_PHYS:
1180 AssertFailed(); /* handled by the caller */
1181 case RTR0MEMOBJTYPE_RES_VIRT:
1182 default:
1183 return NIL_RTHCPHYS;
1184 }
1185}
1186
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette