VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/solaris/memobj-r0drv-solaris.c@ 91446

Last change on this file since 91446 was 91446, checked in by vboxsync, 3 years ago

IPRT/memobj: Adding RTR0MemObjAllocLarge for speeding up large page allocations. bugref:5324

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 40.9 KB
Line 
1/* $Id: memobj-r0drv-solaris.c 91446 2021-09-28 19:53:25Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, Solaris.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#include "the-solaris-kernel.h"
32#include "internal/iprt.h"
33#include <iprt/memobj.h>
34
35#include <iprt/asm.h>
36#include <iprt/assert.h>
37#include <iprt/err.h>
38#include <iprt/log.h>
39#include <iprt/mem.h>
40#include <iprt/param.h>
41#include <iprt/process.h>
42#include "internal/memobj.h"
43#include "memobj-r0drv-solaris.h"
44
45
46/*********************************************************************************************************************************
47* Defined Constants And Macros *
48*********************************************************************************************************************************/
49#define SOL_IS_KRNL_ADDR(vx) ((uintptr_t)(vx) >= kernelbase)
50
51
52/*********************************************************************************************************************************
53* Structures and Typedefs *
54*********************************************************************************************************************************/
55/**
56 * The Solaris version of the memory object structure.
57 */
58typedef struct RTR0MEMOBJSOL
59{
60 /** The core structure. */
61 RTR0MEMOBJINTERNAL Core;
62 /** Pointer to kernel memory cookie. */
63 ddi_umem_cookie_t Cookie;
64 /** Shadow locked pages. */
65 void *pvHandle;
66 /** Access during locking. */
67 int fAccess;
68 /** Set if large pages are involved in an RTR0MEMOBJTYPE_PHYS
69 * allocation. */
70 bool fLargePage;
71 /** Whether we have individual pages or a kernel-mapped virtual memory block in
72 * an RTR0MEMOBJTYPE_PHYS_NC allocation. */
73 bool fIndivPages;
74} RTR0MEMOBJSOL, *PRTR0MEMOBJSOL;
75
76
77/*********************************************************************************************************************************
78* Global Variables *
79*********************************************************************************************************************************/
80static vnode_t g_PageVnode;
81static kmutex_t g_OffsetMtx;
82static u_offset_t g_offPage;
83
84static vnode_t g_LargePageVnode;
85static kmutex_t g_LargePageOffsetMtx;
86static u_offset_t g_offLargePage;
87static bool g_fLargePageNoReloc;
88
89
90/**
91 * Returns the physical address for a virtual address.
92 *
93 * @param pv The virtual address.
94 *
95 * @returns The physical address corresponding to @a pv.
96 */
97static uint64_t rtR0MemObjSolVirtToPhys(void *pv)
98{
99 struct hat *pHat = NULL;
100 pfn_t PageFrameNum = 0;
101 uintptr_t uVirtAddr = (uintptr_t)pv;
102
103 if (SOL_IS_KRNL_ADDR(pv))
104 pHat = kas.a_hat;
105 else
106 {
107 proc_t *pProcess = (proc_t *)RTR0ProcHandleSelf();
108 AssertRelease(pProcess);
109 pHat = pProcess->p_as->a_hat;
110 }
111
112 PageFrameNum = hat_getpfnum(pHat, (caddr_t)(uVirtAddr & PAGEMASK));
113 AssertReleaseMsg(PageFrameNum != PFN_INVALID, ("rtR0MemObjSolVirtToPhys failed. pv=%p\n", pv));
114 return (((uint64_t)PageFrameNum << PAGE_SHIFT) | (uVirtAddr & PAGE_OFFSET_MASK));
115}
116
117
118/**
119 * Returns the physical address for a page.
120 *
121 * @param pPage Pointer to the page.
122 *
123 * @returns The physical address for a page.
124 */
125static inline uint64_t rtR0MemObjSolPagePhys(page_t *pPage)
126{
127 AssertPtr(pPage);
128 pfn_t PageFrameNum = page_pptonum(pPage);
129 AssertReleaseMsg(PageFrameNum != PFN_INVALID, ("rtR0MemObjSolPagePhys failed pPage=%p\n"));
130 return (uint64_t)PageFrameNum << PAGE_SHIFT;
131}
132
133
134/**
135 * Allocates one page.
136 *
137 * @param virtAddr The virtual address to which this page maybe mapped in
138 * the future.
139 *
140 * @returns Pointer to the allocated page, NULL on failure.
141 */
142static page_t *rtR0MemObjSolPageAlloc(caddr_t virtAddr)
143{
144 u_offset_t offPage;
145 seg_t KernelSeg;
146
147 /*
148 * 16777215 terabytes of total memory for all VMs or
149 * restart 8000 1GB VMs 2147483 times until wraparound!
150 */
151 mutex_enter(&g_OffsetMtx);
152 AssertCompileSize(u_offset_t, sizeof(uint64_t)); NOREF(RTASSERTVAR);
153 g_offPage = RT_ALIGN_64(g_offPage, PAGE_SIZE) + PAGE_SIZE;
154 offPage = g_offPage;
155 mutex_exit(&g_OffsetMtx);
156
157 KernelSeg.s_as = &kas;
158 page_t *pPage = page_create_va(&g_PageVnode, offPage, PAGE_SIZE, PG_WAIT | PG_NORELOC, &KernelSeg, virtAddr);
159 if (RT_LIKELY(pPage))
160 {
161 /*
162 * Lock this page into memory "long term" to prevent this page from being paged out
163 * when we drop the page lock temporarily (during free). Downgrade to a shared lock
164 * to prevent page relocation.
165 */
166 page_pp_lock(pPage, 0 /* COW */, 1 /* Kernel */);
167 page_io_unlock(pPage);
168 page_downgrade(pPage);
169 Assert(PAGE_LOCKED_SE(pPage, SE_SHARED));
170 }
171
172 return pPage;
173}
174
175
176/**
177 * Destroys an allocated page.
178 *
179 * @param pPage Pointer to the page to be destroyed.
180 * @remarks This function expects page in @c pPage to be shared locked.
181 */
182static void rtR0MemObjSolPageDestroy(page_t *pPage)
183{
184 /*
185 * We need to exclusive lock the pages before freeing them, if upgrading the shared lock to exclusive fails,
186 * drop the page lock and look it up from the hash. Record the page offset before we drop the page lock as
187 * we cannot touch any page_t members once the lock is dropped.
188 */
189 AssertPtr(pPage);
190 Assert(PAGE_LOCKED_SE(pPage, SE_SHARED));
191
192 u_offset_t offPage = pPage->p_offset;
193 int rc = page_tryupgrade(pPage);
194 if (!rc)
195 {
196 page_unlock(pPage);
197 page_t *pFoundPage = page_lookup(&g_PageVnode, offPage, SE_EXCL);
198
199 /*
200 * Since we allocated the pages as PG_NORELOC we should only get back the exact page always.
201 */
202 AssertReleaseMsg(pFoundPage == pPage, ("Page lookup failed %p:%llx returned %p, expected %p\n",
203 &g_PageVnode, offPage, pFoundPage, pPage));
204 }
205 Assert(PAGE_LOCKED_SE(pPage, SE_EXCL));
206 page_pp_unlock(pPage, 0 /* COW */, 1 /* Kernel */);
207 page_destroy(pPage, 0 /* move it to the free list */);
208}
209
210
211/* Currently not used on 32-bits, define it to shut up gcc. */
212#if HC_ARCH_BITS == 64
213/**
214 * Allocates physical, non-contiguous memory of pages.
215 *
216 * @param puPhys Where to store the physical address of first page. Optional,
217 * can be NULL.
218 * @param cb The size of the allocation.
219 *
220 * @return Array of allocated pages, NULL on failure.
221 */
222static page_t **rtR0MemObjSolPagesAlloc(uint64_t *puPhys, size_t cb)
223{
224 /*
225 * VM1:
226 * The page freelist and cachelist both hold pages that are not mapped into any address space.
227 * The cachelist is not really free pages but when memory is exhausted they'll be moved to the
228 * free lists, it's the total of the free+cache list that we see on the 'free' column in vmstat.
229 *
230 * VM2:
231 * @todo Document what happens behind the scenes in VM2 regarding the free and cachelist.
232 */
233
234 /*
235 * Non-pageable memory reservation request for _4K pages, don't sleep.
236 */
237 size_t cPages = (cb + PAGE_SIZE - 1) >> PAGE_SHIFT;
238 int rc = page_resv(cPages, KM_NOSLEEP);
239 if (rc)
240 {
241 size_t cbPages = cPages * sizeof(page_t *);
242 page_t **ppPages = kmem_zalloc(cbPages, KM_SLEEP);
243 if (RT_LIKELY(ppPages))
244 {
245 /*
246 * Get pages from kseg, the 'virtAddr' here is only for colouring but unfortunately
247 * we don't yet have the 'virtAddr' to which this memory may be mapped.
248 */
249 caddr_t virtAddr = 0;
250 for (size_t i = 0; i < cPages; i++, virtAddr += PAGE_SIZE)
251 {
252 /*
253 * Get a page from the free list locked exclusively. The page will be named (hashed in)
254 * and we rely on it during free. The page we get will be shared locked to prevent the page
255 * from being relocated.
256 */
257 page_t *pPage = rtR0MemObjSolPageAlloc(virtAddr);
258 if (RT_UNLIKELY(!pPage))
259 {
260 /*
261 * No page found, release whatever pages we grabbed so far.
262 */
263 for (size_t k = 0; k < i; k++)
264 rtR0MemObjSolPageDestroy(ppPages[k]);
265 kmem_free(ppPages, cbPages);
266 page_unresv(cPages);
267 return NULL;
268 }
269
270 ppPages[i] = pPage;
271 }
272
273 if (puPhys)
274 *puPhys = rtR0MemObjSolPagePhys(ppPages[0]);
275 return ppPages;
276 }
277
278 page_unresv(cPages);
279 }
280
281 return NULL;
282}
283#endif /* HC_ARCH_BITS == 64 */
284
285
286/**
287 * Frees the allocates pages.
288 *
289 * @param ppPages Pointer to the page list.
290 * @param cbPages Size of the allocation.
291 */
292static void rtR0MemObjSolPagesFree(page_t **ppPages, size_t cb)
293{
294 size_t cPages = (cb + PAGE_SIZE - 1) >> PAGE_SHIFT;
295 size_t cbPages = cPages * sizeof(page_t *);
296 for (size_t iPage = 0; iPage < cPages; iPage++)
297 rtR0MemObjSolPageDestroy(ppPages[iPage]);
298
299 kmem_free(ppPages, cbPages);
300 page_unresv(cPages);
301}
302
303
304/**
305 * Allocates one large page.
306 *
307 * @param puPhys Where to store the physical address of the allocated
308 * page. Optional, can be NULL.
309 * @param cbLargePage Size of the large page.
310 *
311 * @returns Pointer to a list of pages that cover the large page, NULL on
312 * failure.
313 */
314static page_t **rtR0MemObjSolLargePageAlloc(uint64_t *puPhys, size_t cbLargePage)
315{
316 /*
317 * Check PG_NORELOC support for large pages. Using this helps prevent _1G page
318 * fragementation on systems that support it.
319 */
320 static bool fPageNoRelocChecked = false;
321 if (fPageNoRelocChecked == false)
322 {
323 fPageNoRelocChecked = true;
324 g_fLargePageNoReloc = false;
325 if ( g_pfnrtR0Sol_page_noreloc_supported
326 && g_pfnrtR0Sol_page_noreloc_supported(cbLargePage))
327 {
328 g_fLargePageNoReloc = true;
329 }
330 }
331
332 /*
333 * Non-pageable memory reservation request for _4K pages, don't sleep.
334 */
335 size_t cPages = (cbLargePage + PAGE_SIZE - 1) >> PAGE_SHIFT;
336 size_t cbPages = cPages * sizeof(page_t *);
337 u_offset_t offPage = 0;
338 int rc = page_resv(cPages, KM_NOSLEEP);
339 if (rc)
340 {
341 page_t **ppPages = kmem_zalloc(cbPages, KM_SLEEP);
342 if (RT_LIKELY(ppPages))
343 {
344 mutex_enter(&g_LargePageOffsetMtx);
345 AssertCompileSize(u_offset_t, sizeof(uint64_t)); NOREF(RTASSERTVAR);
346 g_offLargePage = RT_ALIGN_64(g_offLargePage, cbLargePage) + cbLargePage;
347 offPage = g_offLargePage;
348 mutex_exit(&g_LargePageOffsetMtx);
349
350 seg_t KernelSeg;
351 KernelSeg.s_as = &kas;
352 page_t *pRootPage = page_create_va_large(&g_LargePageVnode, offPage, cbLargePage,
353 PG_EXCL | (g_fLargePageNoReloc ? PG_NORELOC : 0), &KernelSeg,
354 0 /* vaddr */,NULL /* locality group */);
355 if (pRootPage)
356 {
357 /*
358 * Split it into sub-pages, downgrade each page to a shared lock to prevent page relocation.
359 */
360 page_t *pPageList = pRootPage;
361 for (size_t iPage = 0; iPage < cPages; iPage++)
362 {
363 page_t *pPage = pPageList;
364 AssertPtr(pPage);
365 AssertMsg(page_pptonum(pPage) == iPage + page_pptonum(pRootPage),
366 ("%p:%lx %lx+%lx\n", pPage, page_pptonum(pPage), iPage, page_pptonum(pRootPage)));
367 AssertMsg(pPage->p_szc == pRootPage->p_szc, ("Size code mismatch %p %d %d\n", pPage,
368 (int)pPage->p_szc, (int)pRootPage->p_szc));
369
370 /*
371 * Lock the page into memory "long term". This prevents callers of page_try_demote_pages() (such as the
372 * pageout scanner) from demoting the large page into smaller pages while we temporarily release the
373 * exclusive lock (during free). We pass "0, 1" since we've already accounted for availrmem during
374 * page_resv().
375 */
376 page_pp_lock(pPage, 0 /* COW */, 1 /* Kernel */);
377
378 page_sub(&pPageList, pPage);
379 page_io_unlock(pPage);
380 page_downgrade(pPage);
381 Assert(PAGE_LOCKED_SE(pPage, SE_SHARED));
382
383 ppPages[iPage] = pPage;
384 }
385 Assert(pPageList == NULL);
386 Assert(ppPages[0] == pRootPage);
387
388 uint64_t uPhys = rtR0MemObjSolPagePhys(pRootPage);
389 AssertMsg(!(uPhys & (cbLargePage - 1)), ("%llx %zx\n", uPhys, cbLargePage));
390 if (puPhys)
391 *puPhys = uPhys;
392 return ppPages;
393 }
394
395 /*
396 * Don't restore offPrev in case of failure (race condition), we have plenty of offset space.
397 * The offset must be unique (for the same vnode) or we'll encounter panics on page_create_va_large().
398 */
399 kmem_free(ppPages, cbPages);
400 }
401
402 page_unresv(cPages);
403 }
404 return NULL;
405}
406
407
408/**
409 * Frees the large page.
410 *
411 * @param ppPages Pointer to the list of small pages that cover the
412 * large page.
413 * @param cbLargePage Size of the allocation (i.e. size of the large
414 * page).
415 */
416static void rtR0MemObjSolLargePageFree(page_t **ppPages, size_t cbLargePage)
417{
418 Assert(ppPages);
419 Assert(cbLargePage > PAGE_SIZE);
420
421 bool fDemoted = false;
422 size_t cPages = (cbLargePage + PAGE_SIZE - 1) >> PAGE_SHIFT;
423 size_t cbPages = cPages * sizeof(page_t *);
424 page_t *pPageList = ppPages[0];
425
426 for (size_t iPage = 0; iPage < cPages; iPage++)
427 {
428 /*
429 * We need the pages exclusively locked, try upgrading the shared lock.
430 * If it fails, drop the shared page lock (cannot access any page_t members once this is done)
431 * and lookup the page from the page hash locking it exclusively.
432 */
433 page_t *pPage = ppPages[iPage];
434 u_offset_t offPage = pPage->p_offset;
435 int rc = page_tryupgrade(pPage);
436 if (!rc)
437 {
438 page_unlock(pPage);
439 page_t *pFoundPage = page_lookup(&g_LargePageVnode, offPage, SE_EXCL);
440 AssertRelease(pFoundPage);
441
442 if (g_fLargePageNoReloc)
443 {
444 /*
445 * This can only be guaranteed if PG_NORELOC is used while allocating the pages.
446 */
447 AssertReleaseMsg(pFoundPage == pPage,
448 ("lookup failed %p:%llu returned %p, expected %p\n", &g_LargePageVnode, offPage,
449 pFoundPage, pPage));
450 }
451
452 /*
453 * Check for page demotion (regardless of relocation). Some places in Solaris (e.g. VM1 page_retire())
454 * could possibly demote the large page to _4K pages between our call to page_unlock() and page_lookup().
455 */
456 if (page_get_pagecnt(pFoundPage->p_szc) == 1) /* Base size of only _4K associated with this page. */
457 fDemoted = true;
458 pPage = pFoundPage;
459 ppPages[iPage] = pFoundPage;
460 }
461 Assert(PAGE_LOCKED_SE(pPage, SE_EXCL));
462 page_pp_unlock(pPage, 0 /* COW */, 1 /* Kernel */);
463 }
464
465 if (fDemoted)
466 {
467 for (size_t iPage = 0; iPage < cPages; iPage++)
468 {
469 Assert(page_get_pagecnt(ppPages[iPage]->p_szc) == 1);
470 page_destroy(ppPages[iPage], 0 /* move it to the free list */);
471 }
472 }
473 else
474 {
475 /*
476 * Although we shred the adjacent pages in the linked list, page_destroy_pages works on
477 * adjacent pages via array increments. So this does indeed free all the pages.
478 */
479 AssertPtr(pPageList);
480 page_destroy_pages(pPageList);
481 }
482 kmem_free(ppPages, cbPages);
483 page_unresv(cPages);
484}
485
486
487/**
488 * Unmaps kernel/user-space mapped memory.
489 *
490 * @param pv Pointer to the mapped memory block.
491 * @param cb Size of the memory block.
492 */
493static void rtR0MemObjSolUnmap(void *pv, size_t cb)
494{
495 if (SOL_IS_KRNL_ADDR(pv))
496 {
497 hat_unload(kas.a_hat, pv, cb, HAT_UNLOAD | HAT_UNLOAD_UNLOCK);
498 vmem_free(heap_arena, pv, cb);
499 }
500 else
501 {
502 struct as *pAddrSpace = ((proc_t *)RTR0ProcHandleSelf())->p_as;
503 AssertPtr(pAddrSpace);
504 as_rangelock(pAddrSpace);
505 as_unmap(pAddrSpace, pv, cb);
506 as_rangeunlock(pAddrSpace);
507 }
508}
509
510
511/**
512 * Lock down memory mappings for a virtual address.
513 *
514 * @param pv Pointer to the memory to lock down.
515 * @param cb Size of the memory block.
516 * @param fAccess Page access rights (S_READ, S_WRITE, S_EXEC)
517 *
518 * @returns IPRT status code.
519 */
520static int rtR0MemObjSolLock(void *pv, size_t cb, int fPageAccess)
521{
522 /*
523 * Kernel memory mappings on x86/amd64 are always locked, only handle user-space memory.
524 */
525 if (!SOL_IS_KRNL_ADDR(pv))
526 {
527 proc_t *pProc = (proc_t *)RTR0ProcHandleSelf();
528 AssertPtr(pProc);
529 faultcode_t rc = as_fault(pProc->p_as->a_hat, pProc->p_as, (caddr_t)pv, cb, F_SOFTLOCK, fPageAccess);
530 if (rc)
531 {
532 LogRel(("rtR0MemObjSolLock failed for pv=%pv cb=%lx fPageAccess=%d rc=%d\n", pv, cb, fPageAccess, rc));
533 return VERR_LOCK_FAILED;
534 }
535 }
536 return VINF_SUCCESS;
537}
538
539
540/**
541 * Unlock memory mappings for a virtual address.
542 *
543 * @param pv Pointer to the locked memory.
544 * @param cb Size of the memory block.
545 * @param fPageAccess Page access rights (S_READ, S_WRITE, S_EXEC).
546 */
547static void rtR0MemObjSolUnlock(void *pv, size_t cb, int fPageAccess)
548{
549 if (!SOL_IS_KRNL_ADDR(pv))
550 {
551 proc_t *pProcess = (proc_t *)RTR0ProcHandleSelf();
552 AssertPtr(pProcess);
553 as_fault(pProcess->p_as->a_hat, pProcess->p_as, (caddr_t)pv, cb, F_SOFTUNLOCK, fPageAccess);
554 }
555}
556
557
558/**
559 * Maps a list of physical pages into user address space.
560 *
561 * @param pVirtAddr Where to store the virtual address of the mapping.
562 * @param fPageAccess Page access rights (PROT_READ, PROT_WRITE,
563 * PROT_EXEC)
564 * @param paPhysAddrs Array of physical addresses to pages.
565 * @param cb Size of memory being mapped.
566 *
567 * @returns IPRT status code.
568 */
569static int rtR0MemObjSolUserMap(caddr_t *pVirtAddr, unsigned fPageAccess, uint64_t *paPhysAddrs, size_t cb, size_t cbPageSize)
570{
571 struct as *pAddrSpace = ((proc_t *)RTR0ProcHandleSelf())->p_as;
572 int rc;
573 SEGVBOX_CRARGS Args;
574
575 Args.paPhysAddrs = paPhysAddrs;
576 Args.fPageAccess = fPageAccess;
577 Args.cbPageSize = cbPageSize;
578
579 as_rangelock(pAddrSpace);
580 if (g_frtSolOldMapAddr)
581 g_rtSolMapAddr.u.pfnSol_map_addr_old(pVirtAddr, cb, 0 /* offset */, 0 /* vacalign */, MAP_SHARED);
582 else
583 g_rtSolMapAddr.u.pfnSol_map_addr(pVirtAddr, cb, 0 /* offset */, MAP_SHARED);
584 if (*pVirtAddr != NULL)
585 rc = as_map(pAddrSpace, *pVirtAddr, cb, rtR0SegVBoxSolCreate, &Args);
586 else
587 rc = ENOMEM;
588 as_rangeunlock(pAddrSpace);
589
590 return RTErrConvertFromErrno(rc);
591}
592
593
594DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
595{
596 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)pMem;
597
598 switch (pMemSolaris->Core.enmType)
599 {
600 case RTR0MEMOBJTYPE_LOW:
601 rtR0SolMemFree(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
602 break;
603
604 case RTR0MEMOBJTYPE_PHYS:
605 if (pMemSolaris->Core.u.Phys.fAllocated)
606 {
607 if (pMemSolaris->fLargePage)
608 rtR0MemObjSolLargePageFree(pMemSolaris->pvHandle, pMemSolaris->Core.cb);
609 else
610 rtR0SolMemFree(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
611 }
612 break;
613
614 case RTR0MEMOBJTYPE_PHYS_NC:
615 if (pMemSolaris->fIndivPages)
616 rtR0MemObjSolPagesFree(pMemSolaris->pvHandle, pMemSolaris->Core.cb);
617 else
618 rtR0SolMemFree(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
619 break;
620
621 case RTR0MEMOBJTYPE_PAGE:
622 ddi_umem_free(pMemSolaris->Cookie);
623 break;
624
625 case RTR0MEMOBJTYPE_LOCK:
626 rtR0MemObjSolUnlock(pMemSolaris->Core.pv, pMemSolaris->Core.cb, pMemSolaris->fAccess);
627 break;
628
629 case RTR0MEMOBJTYPE_MAPPING:
630 rtR0MemObjSolUnmap(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
631 break;
632
633 case RTR0MEMOBJTYPE_RES_VIRT:
634 {
635 if (pMemSolaris->Core.u.ResVirt.R0Process == NIL_RTR0PROCESS)
636 vmem_xfree(heap_arena, pMemSolaris->Core.pv, pMemSolaris->Core.cb);
637 else
638 AssertFailed();
639 break;
640 }
641
642 case RTR0MEMOBJTYPE_CONT: /* we don't use this type here. */
643 default:
644 AssertMsgFailed(("enmType=%d\n", pMemSolaris->Core.enmType));
645 return VERR_INTERNAL_ERROR;
646 }
647
648 return VINF_SUCCESS;
649}
650
651
652DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
653{
654 /* Create the object. */
655 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PAGE, NULL, cb);
656 if (RT_UNLIKELY(!pMemSolaris))
657 return VERR_NO_MEMORY;
658
659 void *pvMem = ddi_umem_alloc(cb, DDI_UMEM_SLEEP, &pMemSolaris->Cookie);
660 if (RT_UNLIKELY(!pvMem))
661 {
662 rtR0MemObjDelete(&pMemSolaris->Core);
663 return VERR_NO_PAGE_MEMORY;
664 }
665
666 pMemSolaris->Core.pv = pvMem;
667 pMemSolaris->pvHandle = NULL;
668 *ppMem = &pMemSolaris->Core;
669 return VINF_SUCCESS;
670}
671
672
673DECLHIDDEN(int) rtR0MemObjNativeAllocLarge(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, size_t cbLargePage, uint32_t fFlags,
674 const char *pszTag)
675{
676 return rtR0MemObjFallbackAllocLarge(ppMem, cb, cbLargePage, fFlags, pszTag);
677}
678
679
680DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
681{
682 NOREF(fExecutable);
683
684 /* Create the object */
685 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOW, NULL, cb);
686 if (!pMemSolaris)
687 return VERR_NO_MEMORY;
688
689 /* Allocate physically low page-aligned memory. */
690 uint64_t uPhysHi = _4G - 1;
691 void *pvMem = rtR0SolMemAlloc(uPhysHi, NULL /* puPhys */, cb, PAGE_SIZE, false /* fContig */);
692 if (RT_UNLIKELY(!pvMem))
693 {
694 rtR0MemObjDelete(&pMemSolaris->Core);
695 return VERR_NO_LOW_MEMORY;
696 }
697 pMemSolaris->Core.pv = pvMem;
698 pMemSolaris->pvHandle = NULL;
699 *ppMem = &pMemSolaris->Core;
700 return VINF_SUCCESS;
701}
702
703
704DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
705{
706 NOREF(fExecutable);
707 return rtR0MemObjNativeAllocPhys(ppMem, cb, _4G - 1, PAGE_SIZE /* alignment */);
708}
709
710
711DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
712{
713#if HC_ARCH_BITS == 64
714 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS_NC, NULL, cb);
715 if (RT_UNLIKELY(!pMemSolaris))
716 return VERR_NO_MEMORY;
717
718 if (PhysHighest == NIL_RTHCPHYS)
719 {
720 uint64_t PhysAddr = UINT64_MAX;
721 void *pvPages = rtR0MemObjSolPagesAlloc(&PhysAddr, cb);
722 if (!pvPages)
723 {
724 LogRel(("rtR0MemObjNativeAllocPhysNC: rtR0MemObjSolPagesAlloc failed for cb=%u.\n", cb));
725 rtR0MemObjDelete(&pMemSolaris->Core);
726 return VERR_NO_MEMORY;
727 }
728 Assert(PhysAddr != UINT64_MAX);
729 Assert(!(PhysAddr & PAGE_OFFSET_MASK));
730
731 pMemSolaris->Core.pv = NULL;
732 pMemSolaris->pvHandle = pvPages;
733 pMemSolaris->fIndivPages = true;
734 *ppMem = &pMemSolaris->Core;
735 return VINF_SUCCESS;
736 }
737 else
738 {
739 /*
740 * If we must satisfy an upper limit constraint, it isn't feasible to grab individual pages.
741 * We fall back to using contig_alloc().
742 */
743 uint64_t PhysAddr = UINT64_MAX;
744 void *pvMem = rtR0SolMemAlloc(PhysHighest, &PhysAddr, cb, PAGE_SIZE, false /* fContig */);
745 if (!pvMem)
746 {
747 LogRel(("rtR0MemObjNativeAllocPhysNC: rtR0SolMemAlloc failed for cb=%u PhysHighest=%RHp.\n", cb, PhysHighest));
748 rtR0MemObjDelete(&pMemSolaris->Core);
749 return VERR_NO_MEMORY;
750 }
751 Assert(PhysAddr != UINT64_MAX);
752 Assert(!(PhysAddr & PAGE_OFFSET_MASK));
753
754 pMemSolaris->Core.pv = pvMem;
755 pMemSolaris->pvHandle = NULL;
756 pMemSolaris->fIndivPages = false;
757 *ppMem = &pMemSolaris->Core;
758 return VINF_SUCCESS;
759 }
760
761#else /* 32 bit: */
762 return VERR_NOT_SUPPORTED; /* see the RTR0MemObjAllocPhysNC specs */
763#endif
764}
765
766
767DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
768{
769 AssertMsgReturn(PhysHighest >= 16 *_1M, ("PhysHigest=%RHp\n", PhysHighest), VERR_NOT_SUPPORTED);
770
771 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS, NULL, cb);
772 if (RT_UNLIKELY(!pMemSolaris))
773 return VERR_NO_MEMORY;
774
775 /*
776 * Allocating one large page gets special treatment.
777 */
778 static uint32_t s_cbLargePage = UINT32_MAX;
779 if (s_cbLargePage == UINT32_MAX)
780 {
781 if (page_num_pagesizes() > 1)
782 ASMAtomicWriteU32(&s_cbLargePage, page_get_pagesize(1)); /* Page-size code 1 maps to _2M on Solaris x86/amd64. */
783 else
784 ASMAtomicWriteU32(&s_cbLargePage, 0);
785 }
786
787 uint64_t PhysAddr;
788 if ( cb == s_cbLargePage
789 && cb == uAlignment
790 && PhysHighest == NIL_RTHCPHYS)
791 {
792 /*
793 * Allocate one large page (backed by physically contiguous memory).
794 */
795 void *pvPages = rtR0MemObjSolLargePageAlloc(&PhysAddr, cb);
796 if (RT_LIKELY(pvPages))
797 {
798 AssertMsg(!(PhysAddr & (cb - 1)), ("%RHp\n", PhysAddr));
799 pMemSolaris->Core.pv = NULL;
800 pMemSolaris->Core.u.Phys.PhysBase = PhysAddr;
801 pMemSolaris->Core.u.Phys.fAllocated = true;
802 pMemSolaris->pvHandle = pvPages;
803 pMemSolaris->fLargePage = true;
804
805 *ppMem = &pMemSolaris->Core;
806 return VINF_SUCCESS;
807 }
808 }
809 else
810 {
811 /*
812 * Allocate physically contiguous memory aligned as specified.
813 */
814 AssertCompile(NIL_RTHCPHYS == UINT64_MAX); NOREF(RTASSERTVAR);
815 PhysAddr = PhysHighest;
816 void *pvMem = rtR0SolMemAlloc(PhysHighest, &PhysAddr, cb, uAlignment, true /* fContig */);
817 if (RT_LIKELY(pvMem))
818 {
819 Assert(!(PhysAddr & PAGE_OFFSET_MASK));
820 Assert(PhysAddr < PhysHighest);
821 Assert(PhysAddr + cb <= PhysHighest);
822
823 pMemSolaris->Core.pv = pvMem;
824 pMemSolaris->Core.u.Phys.PhysBase = PhysAddr;
825 pMemSolaris->Core.u.Phys.fAllocated = true;
826 pMemSolaris->pvHandle = NULL;
827 pMemSolaris->fLargePage = false;
828
829 *ppMem = &pMemSolaris->Core;
830 return VINF_SUCCESS;
831 }
832 }
833 rtR0MemObjDelete(&pMemSolaris->Core);
834 return VERR_NO_CONT_MEMORY;
835}
836
837
838DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
839{
840 AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE, VERR_NOT_SUPPORTED);
841
842 /* Create the object. */
843 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS, NULL, cb);
844 if (!pMemSolaris)
845 return VERR_NO_MEMORY;
846
847 /* There is no allocation here, it needs to be mapped somewhere first. */
848 pMemSolaris->Core.u.Phys.fAllocated = false;
849 pMemSolaris->Core.u.Phys.PhysBase = Phys;
850 pMemSolaris->Core.u.Phys.uCachePolicy = uCachePolicy;
851 *ppMem = &pMemSolaris->Core;
852 return VINF_SUCCESS;
853}
854
855
856DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess,
857 RTR0PROCESS R0Process)
858{
859 AssertReturn(R0Process == RTR0ProcHandleSelf(), VERR_INVALID_PARAMETER);
860 NOREF(fAccess);
861
862 /* Create the locking object */
863 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb);
864 if (!pMemSolaris)
865 return VERR_NO_MEMORY;
866
867 /* Lock down user pages. */
868 int fPageAccess = S_READ;
869 if (fAccess & RTMEM_PROT_WRITE)
870 fPageAccess = S_WRITE;
871 if (fAccess & RTMEM_PROT_EXEC)
872 fPageAccess = S_EXEC;
873 int rc = rtR0MemObjSolLock((void *)R3Ptr, cb, fPageAccess);
874 if (RT_FAILURE(rc))
875 {
876 LogRel(("rtR0MemObjNativeLockUser: rtR0MemObjSolLock failed rc=%d\n", rc));
877 rtR0MemObjDelete(&pMemSolaris->Core);
878 return rc;
879 }
880
881 /* Fill in the object attributes and return successfully. */
882 pMemSolaris->Core.u.Lock.R0Process = R0Process;
883 pMemSolaris->pvHandle = NULL;
884 pMemSolaris->fAccess = fPageAccess;
885 *ppMem = &pMemSolaris->Core;
886 return VINF_SUCCESS;
887}
888
889
890DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
891{
892 NOREF(fAccess);
893
894 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOCK, pv, cb);
895 if (!pMemSolaris)
896 return VERR_NO_MEMORY;
897
898 /* Lock down kernel pages. */
899 int fPageAccess = S_READ;
900 if (fAccess & RTMEM_PROT_WRITE)
901 fPageAccess = S_WRITE;
902 if (fAccess & RTMEM_PROT_EXEC)
903 fPageAccess = S_EXEC;
904 int rc = rtR0MemObjSolLock(pv, cb, fPageAccess);
905 if (RT_FAILURE(rc))
906 {
907 LogRel(("rtR0MemObjNativeLockKernel: rtR0MemObjSolLock failed rc=%d\n", rc));
908 rtR0MemObjDelete(&pMemSolaris->Core);
909 return rc;
910 }
911
912 /* Fill in the object attributes and return successfully. */
913 pMemSolaris->Core.u.Lock.R0Process = NIL_RTR0PROCESS;
914 pMemSolaris->pvHandle = NULL;
915 pMemSolaris->fAccess = fPageAccess;
916 *ppMem = &pMemSolaris->Core;
917 return VINF_SUCCESS;
918}
919
920
921DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
922{
923 PRTR0MEMOBJSOL pMemSolaris;
924
925 /*
926 * Use xalloc.
927 */
928 void *pv = vmem_xalloc(heap_arena, cb, uAlignment, 0 /* phase */, 0 /* nocross */,
929 NULL /* minaddr */, NULL /* maxaddr */, VM_SLEEP);
930 if (RT_UNLIKELY(!pv))
931 return VERR_NO_MEMORY;
932
933 /* Create the object. */
934 pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_RES_VIRT, pv, cb);
935 if (!pMemSolaris)
936 {
937 LogRel(("rtR0MemObjNativeReserveKernel failed to alloc memory object.\n"));
938 vmem_xfree(heap_arena, pv, cb);
939 return VERR_NO_MEMORY;
940 }
941
942 pMemSolaris->Core.u.ResVirt.R0Process = NIL_RTR0PROCESS;
943 *ppMem = &pMemSolaris->Core;
944 return VINF_SUCCESS;
945}
946
947
948DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment,
949 RTR0PROCESS R0Process)
950{
951 return VERR_NOT_SUPPORTED;
952}
953
954
955DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
956 unsigned fProt, size_t offSub, size_t cbSub)
957{
958 /* Fail if requested to do something we can't. */
959 AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);
960 if (uAlignment > PAGE_SIZE)
961 return VERR_NOT_SUPPORTED;
962
963 /*
964 * Use xalloc to get address space.
965 */
966 if (!cbSub)
967 cbSub = pMemToMap->cb;
968 void *pv = vmem_xalloc(heap_arena, cbSub, uAlignment, 0 /* phase */, 0 /* nocross */,
969 NULL /* minaddr */, NULL /* maxaddr */, VM_SLEEP);
970 if (RT_UNLIKELY(!pv))
971 return VERR_MAP_FAILED;
972
973 /*
974 * Load the pages from the other object into it.
975 */
976 uint32_t fAttr = HAT_UNORDERED_OK | HAT_MERGING_OK | HAT_LOADCACHING_OK | HAT_STORECACHING_OK;
977 if (fProt & RTMEM_PROT_READ)
978 fAttr |= PROT_READ;
979 if (fProt & RTMEM_PROT_EXEC)
980 fAttr |= PROT_EXEC;
981 if (fProt & RTMEM_PROT_WRITE)
982 fAttr |= PROT_WRITE;
983 fAttr |= HAT_NOSYNC;
984
985 int rc = VINF_SUCCESS;
986 size_t off = 0;
987 while (off < cbSub)
988 {
989 RTHCPHYS HCPhys = RTR0MemObjGetPagePhysAddr(pMemToMap, (offSub + off) >> PAGE_SHIFT);
990 AssertBreakStmt(HCPhys != NIL_RTHCPHYS, rc = VERR_INTERNAL_ERROR_2);
991 pfn_t pfn = HCPhys >> PAGE_SHIFT;
992 AssertBreakStmt(((RTHCPHYS)pfn << PAGE_SHIFT) == HCPhys, rc = VERR_INTERNAL_ERROR_3);
993
994 hat_devload(kas.a_hat, (uint8_t *)pv + off, PAGE_SIZE, pfn, fAttr, HAT_LOAD_LOCK);
995
996 /* Advance. */
997 off += PAGE_SIZE;
998 }
999 if (RT_SUCCESS(rc))
1000 {
1001 /*
1002 * Create a memory object for the mapping.
1003 */
1004 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_MAPPING, pv, cbSub);
1005 if (pMemSolaris)
1006 {
1007 pMemSolaris->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
1008 *ppMem = &pMemSolaris->Core;
1009 return VINF_SUCCESS;
1010 }
1011
1012 LogRel(("rtR0MemObjNativeMapKernel failed to alloc memory object.\n"));
1013 rc = VERR_NO_MEMORY;
1014 }
1015
1016 if (off)
1017 hat_unload(kas.a_hat, pv, off, HAT_UNLOAD | HAT_UNLOAD_UNLOCK);
1018 vmem_xfree(heap_arena, pv, cbSub);
1019 return rc;
1020}
1021
1022
1023DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, PRTR0MEMOBJINTERNAL pMemToMap, RTR3PTR R3PtrFixed,
1024 size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process, size_t offSub, size_t cbSub)
1025{
1026 /*
1027 * Fend off things we cannot do.
1028 */
1029 AssertMsgReturn(R3PtrFixed == (RTR3PTR)-1, ("%p\n", R3PtrFixed), VERR_NOT_SUPPORTED);
1030 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
1031 if (uAlignment != PAGE_SIZE)
1032 return VERR_NOT_SUPPORTED;
1033
1034 /*
1035 * Get parameters from the source object and offSub/cbSub.
1036 */
1037 PRTR0MEMOBJSOL pMemToMapSolaris = (PRTR0MEMOBJSOL)pMemToMap;
1038 uint8_t *pb = pMemToMapSolaris->Core.pv ? (uint8_t *)pMemToMapSolaris->Core.pv + offSub : NULL;
1039 size_t const cb = cbSub ? cbSub : pMemToMapSolaris->Core.cb;
1040 size_t const cPages = cb >> PAGE_SHIFT;
1041 Assert(!offSub || cbSub);
1042 Assert(!(cb & PAGE_OFFSET_MASK));
1043
1044 /*
1045 * Create the mapping object
1046 */
1047 PRTR0MEMOBJSOL pMemSolaris;
1048 pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_MAPPING, pb, cb);
1049 if (RT_UNLIKELY(!pMemSolaris))
1050 return VERR_NO_MEMORY;
1051
1052 /*
1053 * Gather the physical page address of the pages to be mapped.
1054 */
1055 int rc = VINF_SUCCESS;
1056 uint64_t *paPhysAddrs = kmem_zalloc(sizeof(uint64_t) * cPages, KM_SLEEP);
1057 if (RT_LIKELY(paPhysAddrs))
1058 {
1059 if ( pMemToMapSolaris->Core.enmType == RTR0MEMOBJTYPE_PHYS_NC
1060 && pMemToMapSolaris->fIndivPages)
1061 {
1062 /* Translate individual page_t to physical addresses. */
1063 page_t **papPages = pMemToMapSolaris->pvHandle;
1064 AssertPtr(papPages);
1065 papPages += offSub >> PAGE_SHIFT;
1066 for (size_t iPage = 0; iPage < cPages; iPage++)
1067 paPhysAddrs[iPage] = rtR0MemObjSolPagePhys(papPages[iPage]);
1068 }
1069 else if ( pMemToMapSolaris->Core.enmType == RTR0MEMOBJTYPE_PHYS
1070 && pMemToMapSolaris->fLargePage)
1071 {
1072 /* Split up the large page into page-sized chunks. */
1073 RTHCPHYS Phys = pMemToMapSolaris->Core.u.Phys.PhysBase;
1074 Phys += offSub;
1075 for (size_t iPage = 0; iPage < cPages; iPage++, Phys += PAGE_SIZE)
1076 paPhysAddrs[iPage] = Phys;
1077 }
1078 else
1079 {
1080 /* Have kernel mapping, just translate virtual to physical. */
1081 AssertPtr(pb);
1082 for (size_t iPage = 0; iPage < cPages; iPage++)
1083 {
1084 paPhysAddrs[iPage] = rtR0MemObjSolVirtToPhys(pb);
1085 if (RT_UNLIKELY(paPhysAddrs[iPage] == -(uint64_t)1))
1086 {
1087 LogRel(("rtR0MemObjNativeMapUser: no page to map.\n"));
1088 rc = VERR_MAP_FAILED;
1089 break;
1090 }
1091 pb += PAGE_SIZE;
1092 }
1093 }
1094 if (RT_SUCCESS(rc))
1095 {
1096 /*
1097 * Perform the actual mapping.
1098 */
1099 unsigned fPageAccess = PROT_READ;
1100 if (fProt & RTMEM_PROT_WRITE)
1101 fPageAccess |= PROT_WRITE;
1102 if (fProt & RTMEM_PROT_EXEC)
1103 fPageAccess |= PROT_EXEC;
1104
1105 caddr_t UserAddr = NULL;
1106 rc = rtR0MemObjSolUserMap(&UserAddr, fPageAccess, paPhysAddrs, cb, PAGE_SIZE);
1107 if (RT_SUCCESS(rc))
1108 {
1109 pMemSolaris->Core.u.Mapping.R0Process = R0Process;
1110 pMemSolaris->Core.pv = UserAddr;
1111
1112 *ppMem = &pMemSolaris->Core;
1113 kmem_free(paPhysAddrs, sizeof(uint64_t) * cPages);
1114 return VINF_SUCCESS;
1115 }
1116
1117 LogRel(("rtR0MemObjNativeMapUser: rtR0MemObjSolUserMap failed rc=%d.\n", rc));
1118 }
1119
1120 rc = VERR_MAP_FAILED;
1121 kmem_free(paPhysAddrs, sizeof(uint64_t) * cPages);
1122 }
1123 else
1124 rc = VERR_NO_MEMORY;
1125 rtR0MemObjDelete(&pMemSolaris->Core);
1126 return rc;
1127}
1128
1129
1130DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
1131{
1132 NOREF(pMem);
1133 NOREF(offSub);
1134 NOREF(cbSub);
1135 NOREF(fProt);
1136 return VERR_NOT_SUPPORTED;
1137}
1138
1139
1140DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
1141{
1142 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)pMem;
1143
1144 switch (pMemSolaris->Core.enmType)
1145 {
1146 case RTR0MEMOBJTYPE_PHYS_NC:
1147 if ( pMemSolaris->Core.u.Phys.fAllocated
1148 || !pMemSolaris->fIndivPages)
1149 {
1150 uint8_t *pb = (uint8_t *)pMemSolaris->Core.pv + ((size_t)iPage << PAGE_SHIFT);
1151 return rtR0MemObjSolVirtToPhys(pb);
1152 }
1153 page_t **ppPages = pMemSolaris->pvHandle;
1154 return rtR0MemObjSolPagePhys(ppPages[iPage]);
1155
1156 case RTR0MEMOBJTYPE_PAGE:
1157 case RTR0MEMOBJTYPE_LOW:
1158 case RTR0MEMOBJTYPE_LOCK:
1159 {
1160 uint8_t *pb = (uint8_t *)pMemSolaris->Core.pv + ((size_t)iPage << PAGE_SHIFT);
1161 return rtR0MemObjSolVirtToPhys(pb);
1162 }
1163
1164 /*
1165 * Although mapping can be handled by rtR0MemObjSolVirtToPhys(offset) like the above case,
1166 * request it from the parent so that we have a clear distinction between CONT/PHYS_NC.
1167 */
1168 case RTR0MEMOBJTYPE_MAPPING:
1169 return rtR0MemObjNativeGetPagePhysAddr(pMemSolaris->Core.uRel.Child.pParent, iPage);
1170
1171 case RTR0MEMOBJTYPE_CONT:
1172 case RTR0MEMOBJTYPE_PHYS:
1173 AssertFailed(); /* handled by the caller */
1174 case RTR0MEMOBJTYPE_RES_VIRT:
1175 default:
1176 return NIL_RTHCPHYS;
1177 }
1178}
1179
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette