VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/os2/memobj-r0drv-os2.cpp@ 78382

Last change on this file since 78382 was 78120, checked in by vboxsync, 6 years ago

IPRT: Started adding a RTR0MemObjMapUserEx function that takes offSub and cbSub. bugref:9217

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 20.9 KB
Line 
1/* $Id: memobj-r0drv-os2.cpp 78120 2019-04-12 13:20:50Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, OS/2.
4 */
5
6/*
7 * Contributed by knut st. osmundsen.
8 *
9 * Copyright (C) 2007-2019 Oracle Corporation
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 *
19 * The contents of this file may alternatively be used under the terms
20 * of the Common Development and Distribution License Version 1.0
21 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
22 * VirtualBox OSE distribution, in which case the provisions of the
23 * CDDL are applicable instead of those of the GPL.
24 *
25 * You may elect to license modified versions of this file under the
26 * terms and conditions of either the GPL or the CDDL or both.
27 *
28 * --------------------------------------------------------------------
29 *
30 * This code is based on:
31 *
32 * Copyright (c) 2007 knut st. osmundsen <[email protected]>
33 *
34 * Permission is hereby granted, free of charge, to any person
35 * obtaining a copy of this software and associated documentation
36 * files (the "Software"), to deal in the Software without
37 * restriction, including without limitation the rights to use,
38 * copy, modify, merge, publish, distribute, sublicense, and/or sell
39 * copies of the Software, and to permit persons to whom the
40 * Software is furnished to do so, subject to the following
41 * conditions:
42 *
43 * The above copyright notice and this permission notice shall be
44 * included in all copies or substantial portions of the Software.
45 *
46 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
47 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
48 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
49 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
50 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
51 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
52 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
53 * OTHER DEALINGS IN THE SOFTWARE.
54 */
55
56
57/*********************************************************************************************************************************
58* Header Files *
59*********************************************************************************************************************************/
60#include "the-os2-kernel.h"
61
62#include <iprt/memobj.h>
63#include <iprt/mem.h>
64#include <iprt/err.h>
65#include <iprt/assert.h>
66#include <iprt/log.h>
67#include <iprt/param.h>
68#include <iprt/process.h>
69#include "internal/memobj.h"
70
71
72/*********************************************************************************************************************************
73* Structures and Typedefs *
74*********************************************************************************************************************************/
75/**
76 * The OS/2 version of the memory object structure.
77 */
78typedef struct RTR0MEMOBJDARWIN
79{
80 /** The core structure. */
81 RTR0MEMOBJINTERNAL Core;
82 /** Lock for the ring-3 / ring-0 pinned objectes.
83 * This member might not be allocated for some object types. */
84 KernVMLock_t Lock;
85 /** Array of physical pages.
86 * This array can be 0 in length for some object types. */
87 KernPageList_t aPages[1];
88} RTR0MEMOBJOS2, *PRTR0MEMOBJOS2;
89
90
91/*********************************************************************************************************************************
92* Internal Functions *
93*********************************************************************************************************************************/
94static void rtR0MemObjFixPageList(KernPageList_t *paPages, ULONG cPages, ULONG cPagesRet);
95
96
97DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
98{
99 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)pMem;
100 int rc;
101
102 switch (pMemOs2->Core.enmType)
103 {
104 case RTR0MEMOBJTYPE_PHYS_NC:
105 AssertMsgFailed(("RTR0MEMOBJTYPE_PHYS_NC\n"));
106 return VERR_INTERNAL_ERROR;
107
108 case RTR0MEMOBJTYPE_PHYS:
109 if (!pMemOs2->Core.pv)
110 break;
111
112 case RTR0MEMOBJTYPE_MAPPING:
113 if (pMemOs2->Core.u.Mapping.R0Process == NIL_RTR0PROCESS)
114 break;
115
116 RT_FALL_THRU();
117 case RTR0MEMOBJTYPE_PAGE:
118 case RTR0MEMOBJTYPE_LOW:
119 case RTR0MEMOBJTYPE_CONT:
120 rc = KernVMFree(pMemOs2->Core.pv);
121 AssertMsg(!rc, ("rc=%d type=%d pv=%p cb=%#zx\n", rc, pMemOs2->Core.enmType, pMemOs2->Core.pv, pMemOs2->Core.cb));
122 break;
123
124 case RTR0MEMOBJTYPE_LOCK:
125 rc = KernVMUnlock(&pMemOs2->Lock);
126 AssertMsg(!rc, ("rc=%d\n", rc));
127 break;
128
129 case RTR0MEMOBJTYPE_RES_VIRT:
130 default:
131 AssertMsgFailed(("enmType=%d\n", pMemOs2->Core.enmType));
132 return VERR_INTERNAL_ERROR;
133 }
134
135 return VINF_SUCCESS;
136}
137
138
139DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
140{
141 NOREF(fExecutable);
142
143 /* create the object. */
144 const ULONG cPages = cb >> PAGE_SHIFT;
145 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF_DYN(RTR0MEMOBJOS2, aPages[cPages]),
146 RTR0MEMOBJTYPE_PAGE, NULL, cb);
147 if (!pMemOs2)
148 return VERR_NO_MEMORY;
149
150 /* do the allocation. */
151 int rc = KernVMAlloc(cb, VMDHA_FIXED, &pMemOs2->Core.pv, (PPVOID)-1, NULL);
152 if (!rc)
153 {
154 ULONG cPagesRet = cPages;
155 rc = KernLinToPageList(pMemOs2->Core.pv, cb, &pMemOs2->aPages[0], &cPagesRet);
156 if (!rc)
157 {
158 rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet);
159 *ppMem = &pMemOs2->Core;
160 return VINF_SUCCESS;
161 }
162 KernVMFree(pMemOs2->Core.pv);
163 }
164 rtR0MemObjDelete(&pMemOs2->Core);
165 return RTErrConvertFromOS2(rc);
166}
167
168
169DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
170{
171 NOREF(fExecutable);
172
173 /* create the object. */
174 const ULONG cPages = cb >> PAGE_SHIFT;
175 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF_DYN(RTR0MEMOBJOS2, aPages[cPages]),
176 RTR0MEMOBJTYPE_LOW, NULL, cb);
177 if (!pMemOs2)
178 return VERR_NO_MEMORY;
179
180 /* do the allocation. */
181 int rc = KernVMAlloc(cb, VMDHA_FIXED, &pMemOs2->Core.pv, (PPVOID)-1, NULL);
182 if (!rc)
183 {
184 ULONG cPagesRet = cPages;
185 rc = KernLinToPageList(pMemOs2->Core.pv, cb, &pMemOs2->aPages[0], &cPagesRet);
186 if (!rc)
187 {
188 rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet);
189 *ppMem = &pMemOs2->Core;
190 return VINF_SUCCESS;
191 }
192 KernVMFree(pMemOs2->Core.pv);
193 }
194 rtR0MemObjDelete(&pMemOs2->Core);
195 rc = RTErrConvertFromOS2(rc);
196 return rc == VERR_NO_MEMORY ? VERR_NO_LOW_MEMORY : rc;
197}
198
199
200DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
201{
202 NOREF(fExecutable);
203
204 /* create the object. */
205 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_CONT, NULL, cb);
206 if (!pMemOs2)
207 return VERR_NO_MEMORY;
208
209 /* do the allocation. */
210 ULONG ulPhys = ~0UL;
211 int rc = KernVMAlloc(cb, VMDHA_FIXED | VMDHA_CONTIG, &pMemOs2->Core.pv, (PPVOID)&ulPhys, NULL);
212 if (!rc)
213 {
214 Assert(ulPhys != ~0UL);
215 pMemOs2->Core.u.Cont.Phys = ulPhys;
216 *ppMem = &pMemOs2->Core;
217 return VINF_SUCCESS;
218 }
219 rtR0MemObjDelete(&pMemOs2->Core);
220 return RTErrConvertFromOS2(rc);
221}
222
223
224DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
225{
226 AssertMsgReturn(PhysHighest >= 16 *_1M, ("PhysHigest=%RHp\n", PhysHighest), VERR_NOT_SUPPORTED);
227
228 /** @todo alignment */
229 if (uAlignment != PAGE_SIZE)
230 return VERR_NOT_SUPPORTED;
231
232 /* create the object. */
233 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_PHYS, NULL, cb);
234 if (!pMemOs2)
235 return VERR_NO_MEMORY;
236
237 /* do the allocation. */
238 ULONG ulPhys = ~0UL;
239 int rc = KernVMAlloc(cb, VMDHA_FIXED | VMDHA_CONTIG | (PhysHighest < _4G ? VMDHA_16M : 0), &pMemOs2->Core.pv, (PPVOID)&ulPhys, NULL);
240 if (!rc)
241 {
242 Assert(ulPhys != ~0UL);
243 pMemOs2->Core.u.Phys.fAllocated = true;
244 pMemOs2->Core.u.Phys.PhysBase = ulPhys;
245 *ppMem = &pMemOs2->Core;
246 return VINF_SUCCESS;
247 }
248 rtR0MemObjDelete(&pMemOs2->Core);
249 return RTErrConvertFromOS2(rc);
250}
251
252
253DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
254{
255 /** @todo rtR0MemObjNativeAllocPhys / darwin. */
256 return rtR0MemObjNativeAllocPhys(ppMem, cb, PhysHighest, PAGE_SIZE);
257}
258
259
260DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
261{
262 AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE, VERR_NOT_SUPPORTED);
263
264 /* create the object. */
265 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_PHYS, NULL, cb);
266 if (!pMemOs2)
267 return VERR_NO_MEMORY;
268
269 /* there is no allocation here, right? it needs to be mapped somewhere first. */
270 pMemOs2->Core.u.Phys.fAllocated = false;
271 pMemOs2->Core.u.Phys.PhysBase = Phys;
272 pMemOs2->Core.u.Phys.uCachePolicy = uCachePolicy;
273 *ppMem = &pMemOs2->Core;
274 return VINF_SUCCESS;
275}
276
277
278DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess,
279 RTR0PROCESS R0Process)
280{
281 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
282
283 /* create the object. */
284 const ULONG cPages = cb >> PAGE_SHIFT;
285 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF_DYN(RTR0MEMOBJOS2, aPages[cPages]),
286 RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb);
287 if (!pMemOs2)
288 return VERR_NO_MEMORY;
289
290 /* lock it. */
291 ULONG cPagesRet = cPages;
292 int rc = KernVMLock(VMDHL_LONG | (fAccess & RTMEM_PROT_WRITE ? VMDHL_WRITE : 0),
293 (void *)R3Ptr, cb, &pMemOs2->Lock, &pMemOs2->aPages[0], &cPagesRet);
294 if (!rc)
295 {
296 rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet);
297 Assert(cb == pMemOs2->Core.cb);
298 Assert(R3Ptr == (RTR3PTR)pMemOs2->Core.pv);
299 pMemOs2->Core.u.Lock.R0Process = R0Process;
300 *ppMem = &pMemOs2->Core;
301 return VINF_SUCCESS;
302 }
303 rtR0MemObjDelete(&pMemOs2->Core);
304 return RTErrConvertFromOS2(rc);
305}
306
307
308DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
309{
310 /* create the object. */
311 const ULONG cPages = cb >> PAGE_SHIFT;
312 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF_DYN(RTR0MEMOBJOS2, aPages[cPages]),
313 RTR0MEMOBJTYPE_LOCK, pv, cb);
314 if (!pMemOs2)
315 return VERR_NO_MEMORY;
316
317 /* lock it. */
318 ULONG cPagesRet = cPages;
319 int rc = KernVMLock(VMDHL_LONG | (fAccess & RTMEM_PROT_WRITE ? VMDHL_WRITE : 0),
320 pv, cb, &pMemOs2->Lock, &pMemOs2->aPages[0], &cPagesRet);
321 if (!rc)
322 {
323 rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet);
324 pMemOs2->Core.u.Lock.R0Process = NIL_RTR0PROCESS;
325 *ppMem = &pMemOs2->Core;
326 return VINF_SUCCESS;
327 }
328 rtR0MemObjDelete(&pMemOs2->Core);
329 return RTErrConvertFromOS2(rc);
330}
331
332
333DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
334{
335 RT_NOREF(ppMem, pvFixed, cb, uAlignment);
336 return VERR_NOT_SUPPORTED;
337}
338
339
340DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment,
341 RTR0PROCESS R0Process)
342{
343 RT_NOREF(ppMem, R3PtrFixed, cb, uAlignment, R0Process);
344 return VERR_NOT_SUPPORTED;
345}
346
347
348DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
349 unsigned fProt, size_t offSub, size_t cbSub)
350{
351 AssertMsgReturn(!offSub && !cbSub, ("%#x %#x\n", offSub, cbSub), VERR_NOT_SUPPORTED);
352 AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);
353
354 /*
355 * Check that the specified alignment is supported.
356 */
357 if (uAlignment > PAGE_SIZE)
358 return VERR_NOT_SUPPORTED;
359
360
361/** @todo finish the implementation. */
362
363 int rc;
364 void *pvR0 = NULL;
365 PRTR0MEMOBJOS2 pMemToMapOs2 = (PRTR0MEMOBJOS2)pMemToMap;
366 switch (pMemToMapOs2->Core.enmType)
367 {
368 /*
369 * These has kernel mappings.
370 */
371 case RTR0MEMOBJTYPE_PAGE:
372 case RTR0MEMOBJTYPE_LOW:
373 case RTR0MEMOBJTYPE_CONT:
374 pvR0 = pMemToMapOs2->Core.pv;
375 break;
376
377 case RTR0MEMOBJTYPE_PHYS:
378 pvR0 = pMemToMapOs2->Core.pv;
379 if (!pvR0)
380 {
381 /* no ring-0 mapping, so allocate a mapping in the process. */
382 AssertMsgReturn(fProt & RTMEM_PROT_WRITE, ("%#x\n", fProt), VERR_NOT_SUPPORTED);
383 Assert(!pMemToMapOs2->Core.u.Phys.fAllocated);
384 ULONG ulPhys = (ULONG)pMemToMapOs2->Core.u.Phys.PhysBase;
385 AssertReturn(ulPhys == pMemToMapOs2->Core.u.Phys.PhysBase, VERR_OUT_OF_RANGE);
386 rc = KernVMAlloc(pMemToMapOs2->Core.cb, VMDHA_PHYS, &pvR0, (PPVOID)&ulPhys, NULL);
387 if (rc)
388 return RTErrConvertFromOS2(rc);
389 pMemToMapOs2->Core.pv = pvR0;
390 }
391 break;
392
393 case RTR0MEMOBJTYPE_PHYS_NC:
394 AssertMsgFailed(("RTR0MEMOBJTYPE_PHYS_NC\n"));
395 return VERR_INTERNAL_ERROR_3;
396
397 case RTR0MEMOBJTYPE_LOCK:
398 if (pMemToMapOs2->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
399 return VERR_NOT_SUPPORTED; /** @todo implement this... */
400 pvR0 = pMemToMapOs2->Core.pv;
401 break;
402
403 case RTR0MEMOBJTYPE_RES_VIRT:
404 case RTR0MEMOBJTYPE_MAPPING:
405 default:
406 AssertMsgFailed(("enmType=%d\n", pMemToMapOs2->Core.enmType));
407 return VERR_INTERNAL_ERROR;
408 }
409
410 /*
411 * Create a dummy mapping object for it.
412 *
413 * All mappings are read/write/execute in OS/2 and there isn't
414 * any cache options, so sharing is ok. And the main memory object
415 * isn't actually freed until all the mappings have been freed up
416 * (reference counting).
417 */
418 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_MAPPING,
419 pvR0, pMemToMapOs2->Core.cb);
420 if (pMemOs2)
421 {
422 pMemOs2->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
423 *ppMem = &pMemOs2->Core;
424 return VINF_SUCCESS;
425 }
426 return VERR_NO_MEMORY;
427}
428
429
430DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment,
431 unsigned fProt, RTR0PROCESS R0Process, size_t offSub, size_t cbSub)
432{
433 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
434 AssertMsgReturn(R3PtrFixed == (RTR3PTR)-1, ("%p\n", R3PtrFixed), VERR_NOT_SUPPORTED);
435 if (uAlignment > PAGE_SIZE)
436 return VERR_NOT_SUPPORTED;
437 AssertMsgReturn(!offSub && !cbSub, ("%#zx %#zx\n", offSub, cbSub), VERR_NOT_SUPPORTED); /** @todo implement sub maps */
438
439 int rc;
440 void *pvR0;
441 void *pvR3 = NULL;
442 PRTR0MEMOBJOS2 pMemToMapOs2 = (PRTR0MEMOBJOS2)pMemToMap;
443 switch (pMemToMapOs2->Core.enmType)
444 {
445 /*
446 * These has kernel mappings.
447 */
448 case RTR0MEMOBJTYPE_PAGE:
449 case RTR0MEMOBJTYPE_LOW:
450 case RTR0MEMOBJTYPE_CONT:
451 pvR0 = pMemToMapOs2->Core.pv;
452 break;
453
454 case RTR0MEMOBJTYPE_PHYS:
455 pvR0 = pMemToMapOs2->Core.pv;
456#if 0/* this is wrong. */
457 if (!pvR0)
458 {
459 /* no ring-0 mapping, so allocate a mapping in the process. */
460 AssertMsgReturn(fProt & RTMEM_PROT_WRITE, ("%#x\n", fProt), VERR_NOT_SUPPORTED);
461 Assert(!pMemToMapOs2->Core.u.Phys.fAllocated);
462 ULONG ulPhys = pMemToMapOs2->Core.u.Phys.PhysBase;
463 rc = KernVMAlloc(pMemToMapOs2->Core.cb, VMDHA_PHYS | VMDHA_PROCESS, &pvR3, (PPVOID)&ulPhys, NULL);
464 if (rc)
465 return RTErrConvertFromOS2(rc);
466 }
467 break;
468#endif
469 return VERR_NOT_SUPPORTED;
470
471 case RTR0MEMOBJTYPE_PHYS_NC:
472 AssertMsgFailed(("RTR0MEMOBJTYPE_PHYS_NC\n"));
473 return VERR_INTERNAL_ERROR_5;
474
475 case RTR0MEMOBJTYPE_LOCK:
476 if (pMemToMapOs2->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
477 return VERR_NOT_SUPPORTED; /** @todo implement this... */
478 pvR0 = pMemToMapOs2->Core.pv;
479 break;
480
481 case RTR0MEMOBJTYPE_RES_VIRT:
482 case RTR0MEMOBJTYPE_MAPPING:
483 default:
484 AssertMsgFailed(("enmType=%d\n", pMemToMapOs2->Core.enmType));
485 return VERR_INTERNAL_ERROR;
486 }
487
488 /*
489 * Map the ring-0 memory into the current process.
490 */
491 if (!pvR3)
492 {
493 Assert(pvR0);
494 ULONG flFlags = 0;
495 if (uAlignment == PAGE_SIZE)
496 flFlags |= VMDHGP_4MB;
497 if (fProt & RTMEM_PROT_WRITE)
498 flFlags |= VMDHGP_WRITE;
499 rc = RTR0Os2DHVMGlobalToProcess(flFlags, pvR0, pMemToMapOs2->Core.cb, &pvR3);
500 if (rc)
501 return RTErrConvertFromOS2(rc);
502 }
503 Assert(pvR3);
504
505 /*
506 * Create a mapping object for it.
507 */
508 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_MAPPING,
509 pvR3, pMemToMapOs2->Core.cb);
510 if (pMemOs2)
511 {
512 Assert(pMemOs2->Core.pv == pvR3);
513 pMemOs2->Core.u.Mapping.R0Process = R0Process;
514 *ppMem = &pMemOs2->Core;
515 return VINF_SUCCESS;
516 }
517 KernVMFree(pvR3);
518 return VERR_NO_MEMORY;
519}
520
521
522DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
523{
524 NOREF(pMem);
525 NOREF(offSub);
526 NOREF(cbSub);
527 NOREF(fProt);
528 return VERR_NOT_SUPPORTED;
529}
530
531
532DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
533{
534 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)pMem;
535
536 switch (pMemOs2->Core.enmType)
537 {
538 case RTR0MEMOBJTYPE_PAGE:
539 case RTR0MEMOBJTYPE_LOW:
540 case RTR0MEMOBJTYPE_LOCK:
541 case RTR0MEMOBJTYPE_PHYS_NC:
542 return pMemOs2->aPages[iPage].Addr;
543
544 case RTR0MEMOBJTYPE_CONT:
545 return pMemOs2->Core.u.Cont.Phys + (iPage << PAGE_SHIFT);
546
547 case RTR0MEMOBJTYPE_PHYS:
548 return pMemOs2->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT);
549
550 case RTR0MEMOBJTYPE_RES_VIRT:
551 case RTR0MEMOBJTYPE_MAPPING:
552 default:
553 return NIL_RTHCPHYS;
554 }
555}
556
557
558/**
559 * Expands the page list so we can index pages directly.
560 *
561 * @param paPages The page list array to fix.
562 * @param cPages The number of pages that's supposed to go into the list.
563 * @param cPagesRet The actual number of pages in the list.
564 */
565static void rtR0MemObjFixPageList(KernPageList_t *paPages, ULONG cPages, ULONG cPagesRet)
566{
567 Assert(cPages >= cPagesRet);
568 if (cPages != cPagesRet)
569 {
570 ULONG iIn = cPagesRet;
571 ULONG iOut = cPages;
572 do
573 {
574 iIn--;
575 iOut--;
576 Assert(iIn <= iOut);
577
578 KernPageList_t Page = paPages[iIn];
579 Assert(!(Page.Addr & PAGE_OFFSET_MASK));
580 Assert(Page.Size == RT_ALIGN_Z(Page.Size, PAGE_SIZE));
581
582 if (Page.Size > PAGE_SIZE)
583 {
584 do
585 {
586 Page.Size -= PAGE_SIZE;
587 paPages[iOut].Addr = Page.Addr + Page.Size;
588 paPages[iOut].Size = PAGE_SIZE;
589 iOut--;
590 } while (Page.Size > PAGE_SIZE);
591 }
592
593 paPages[iOut].Addr = Page.Addr;
594 paPages[iOut].Size = PAGE_SIZE;
595 } while ( iIn != iOut
596 && iIn > 0);
597 }
598}
599
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette