VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/os2/memobj-r0drv-os2.cpp@ 74221

Last change on this file since 74221 was 73097, checked in by vboxsync, 6 years ago

*: Made RT_UOFFSETOF, RT_OFFSETOF, RT_UOFFSETOF_ADD and RT_OFFSETOF_ADD work like builtin_offsetof() and require compile time resolvable requests, adding RT_UOFFSETOF_DYN for the dynamic questions that can only be answered at runtime.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 19.5 KB
Line 
1/* $Id: memobj-r0drv-os2.cpp 73097 2018-07-12 21:06:33Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, OS/2.
4 */
5
6/*
7 * Copyright (c) 2007 knut st. osmundsen <[email protected]>
8 *
9 * Permission is hereby granted, free of charge, to any person
10 * obtaining a copy of this software and associated documentation
11 * files (the "Software"), to deal in the Software without
12 * restriction, including without limitation the rights to use,
13 * copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the
15 * Software is furnished to do so, subject to the following
16 * conditions:
17 *
18 * The above copyright notice and this permission notice shall be
19 * included in all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
23 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
25 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
26 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
29 */
30
31
32/*********************************************************************************************************************************
33* Header Files *
34*********************************************************************************************************************************/
35#include "the-os2-kernel.h"
36
37#include <iprt/memobj.h>
38#include <iprt/mem.h>
39#include <iprt/err.h>
40#include <iprt/assert.h>
41#include <iprt/log.h>
42#include <iprt/param.h>
43#include <iprt/process.h>
44#include "internal/memobj.h"
45
46
47/*********************************************************************************************************************************
48* Structures and Typedefs *
49*********************************************************************************************************************************/
50/**
51 * The OS/2 version of the memory object structure.
52 */
53typedef struct RTR0MEMOBJDARWIN
54{
55 /** The core structure. */
56 RTR0MEMOBJINTERNAL Core;
57 /** Lock for the ring-3 / ring-0 pinned objectes.
58 * This member might not be allocated for some object types. */
59 KernVMLock_t Lock;
60 /** Array of physical pages.
61 * This array can be 0 in length for some object types. */
62 KernPageList_t aPages[1];
63} RTR0MEMOBJOS2, *PRTR0MEMOBJOS2;
64
65
66/*********************************************************************************************************************************
67* Internal Functions *
68*********************************************************************************************************************************/
69static void rtR0MemObjFixPageList(KernPageList_t *paPages, ULONG cPages, ULONG cPagesRet);
70
71
72DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
73{
74 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)pMem;
75 int rc;
76
77 switch (pMemOs2->Core.enmType)
78 {
79 case RTR0MEMOBJTYPE_PHYS_NC:
80 AssertMsgFailed(("RTR0MEMOBJTYPE_PHYS_NC\n"));
81 return VERR_INTERNAL_ERROR;
82 break;
83
84 case RTR0MEMOBJTYPE_PHYS:
85 if (!pMemOs2->Core.pv)
86 break;
87
88 case RTR0MEMOBJTYPE_MAPPING:
89 if (pMemOs2->Core.u.Mapping.R0Process == NIL_RTR0PROCESS)
90 break;
91
92 RT_FALL_THRU();
93 case RTR0MEMOBJTYPE_PAGE:
94 case RTR0MEMOBJTYPE_LOW:
95 case RTR0MEMOBJTYPE_CONT:
96 rc = KernVMFree(pMemOs2->Core.pv);
97 AssertMsg(!rc, ("rc=%d type=%d pv=%p cb=%#zx\n", rc, pMemOs2->Core.enmType, pMemOs2->Core.pv, pMemOs2->Core.cb));
98 break;
99
100 case RTR0MEMOBJTYPE_LOCK:
101 rc = KernVMUnlock(&pMemOs2->Lock);
102 AssertMsg(!rc, ("rc=%d\n", rc));
103 break;
104
105 case RTR0MEMOBJTYPE_RES_VIRT:
106 default:
107 AssertMsgFailed(("enmType=%d\n", pMemOs2->Core.enmType));
108 return VERR_INTERNAL_ERROR;
109 }
110
111 return VINF_SUCCESS;
112}
113
114
115DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
116{
117 NOREF(fExecutable);
118
119 /* create the object. */
120 const ULONG cPages = cb >> PAGE_SHIFT;
121 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF_DYN(RTR0MEMOBJOS2, aPages[cPages]),
122 RTR0MEMOBJTYPE_PAGE, NULL, cb);
123 if (!pMemOs2)
124 return VERR_NO_MEMORY;
125
126 /* do the allocation. */
127 int rc = KernVMAlloc(cb, VMDHA_FIXED, &pMemOs2->Core.pv, (PPVOID)-1, NULL);
128 if (!rc)
129 {
130 ULONG cPagesRet = cPages;
131 rc = KernLinToPageList(pMemOs2->Core.pv, cb, &pMemOs2->aPages[0], &cPagesRet);
132 if (!rc)
133 {
134 rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet);
135 *ppMem = &pMemOs2->Core;
136 return VINF_SUCCESS;
137 }
138 KernVMFree(pMemOs2->Core.pv);
139 }
140 rtR0MemObjDelete(&pMemOs2->Core);
141 return RTErrConvertFromOS2(rc);
142}
143
144
145DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
146{
147 NOREF(fExecutable);
148
149 /* create the object. */
150 const ULONG cPages = cb >> PAGE_SHIFT;
151 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF_DYN(RTR0MEMOBJOS2, aPages[cPages]),
152 RTR0MEMOBJTYPE_LOW, NULL, cb);
153 if (!pMemOs2)
154 return VERR_NO_MEMORY;
155
156 /* do the allocation. */
157 int rc = KernVMAlloc(cb, VMDHA_FIXED, &pMemOs2->Core.pv, (PPVOID)-1, NULL);
158 if (!rc)
159 {
160 ULONG cPagesRet = cPages;
161 rc = KernLinToPageList(pMemOs2->Core.pv, cb, &pMemOs2->aPages[0], &cPagesRet);
162 if (!rc)
163 {
164 rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet);
165 *ppMem = &pMemOs2->Core;
166 return VINF_SUCCESS;
167 }
168 KernVMFree(pMemOs2->Core.pv);
169 }
170 rtR0MemObjDelete(&pMemOs2->Core);
171 rc = RTErrConvertFromOS2(rc);
172 return rc == VERR_NO_MEMORY ? VERR_NO_LOW_MEMORY : rc;
173}
174
175
176DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
177{
178 NOREF(fExecutable);
179
180 /* create the object. */
181 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_CONT, NULL, cb);
182 if (!pMemOs2)
183 return VERR_NO_MEMORY;
184
185 /* do the allocation. */
186 ULONG ulPhys = ~0UL;
187 int rc = KernVMAlloc(cb, VMDHA_FIXED | VMDHA_CONTIG, &pMemOs2->Core.pv, (PPVOID)&ulPhys, NULL);
188 if (!rc)
189 {
190 Assert(ulPhys != ~0UL);
191 pMemOs2->Core.u.Cont.Phys = ulPhys;
192 *ppMem = &pMemOs2->Core;
193 return VINF_SUCCESS;
194 }
195 rtR0MemObjDelete(&pMemOs2->Core);
196 return RTErrConvertFromOS2(rc);
197}
198
199
200DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
201{
202 AssertMsgReturn(PhysHighest >= 16 *_1M, ("PhysHigest=%RHp\n", PhysHighest), VERR_NOT_SUPPORTED);
203
204 /** @todo alignment */
205 if (uAlignment != PAGE_SIZE)
206 return VERR_NOT_SUPPORTED;
207
208 /* create the object. */
209 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_PHYS, NULL, cb);
210 if (!pMemOs2)
211 return VERR_NO_MEMORY;
212
213 /* do the allocation. */
214 ULONG ulPhys = ~0UL;
215 int rc = KernVMAlloc(cb, VMDHA_FIXED | VMDHA_CONTIG | (PhysHighest < _4G ? VMDHA_16M : 0), &pMemOs2->Core.pv, (PPVOID)&ulPhys, NULL);
216 if (!rc)
217 {
218 Assert(ulPhys != ~0UL);
219 pMemOs2->Core.u.Phys.fAllocated = true;
220 pMemOs2->Core.u.Phys.PhysBase = ulPhys;
221 *ppMem = &pMemOs2->Core;
222 return VINF_SUCCESS;
223 }
224 rtR0MemObjDelete(&pMemOs2->Core);
225 return RTErrConvertFromOS2(rc);
226}
227
228
229DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
230{
231 /** @todo rtR0MemObjNativeAllocPhys / darwin. */
232 return rtR0MemObjNativeAllocPhys(ppMem, cb, PhysHighest, PAGE_SIZE);
233}
234
235
236DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
237{
238 AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE, VERR_NOT_SUPPORTED);
239
240 /* create the object. */
241 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_PHYS, NULL, cb);
242 if (!pMemOs2)
243 return VERR_NO_MEMORY;
244
245 /* there is no allocation here, right? it needs to be mapped somewhere first. */
246 pMemOs2->Core.u.Phys.fAllocated = false;
247 pMemOs2->Core.u.Phys.PhysBase = Phys;
248 pMemOs2->Core.u.Phys.uCachePolicy = uCachePolicy;
249 *ppMem = &pMemOs2->Core;
250 return VINF_SUCCESS;
251}
252
253
254DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess,
255 RTR0PROCESS R0Process)
256{
257 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
258
259 /* create the object. */
260 const ULONG cPages = cb >> PAGE_SHIFT;
261 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF_DYN(RTR0MEMOBJOS2, aPages[cPages]),
262 RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb);
263 if (!pMemOs2)
264 return VERR_NO_MEMORY;
265
266 /* lock it. */
267 ULONG cPagesRet = cPages;
268 int rc = KernVMLock(VMDHL_LONG | (fAccess & RTMEM_PROT_WRITE ? VMDHL_WRITE : 0),
269 (void *)R3Ptr, cb, &pMemOs2->Lock, &pMemOs2->aPages[0], &cPagesRet);
270 if (!rc)
271 {
272 rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet);
273 Assert(cb == pMemOs2->Core.cb);
274 Assert(R3Ptr == (RTR3PTR)pMemOs2->Core.pv);
275 pMemOs2->Core.u.Lock.R0Process = R0Process;
276 *ppMem = &pMemOs2->Core;
277 return VINF_SUCCESS;
278 }
279 rtR0MemObjDelete(&pMemOs2->Core);
280 return RTErrConvertFromOS2(rc);
281}
282
283
284DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
285{
286 /* create the object. */
287 const ULONG cPages = cb >> PAGE_SHIFT;
288 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF_DYN(RTR0MEMOBJOS2, aPages[cPages]),
289 RTR0MEMOBJTYPE_LOCK, pv, cb);
290 if (!pMemOs2)
291 return VERR_NO_MEMORY;
292
293 /* lock it. */
294 ULONG cPagesRet = cPages;
295 int rc = KernVMLock(VMDHL_LONG | (fAccess & RTMEM_PROT_WRITE ? VMDHL_WRITE : 0),
296 pv, cb, &pMemOs2->Lock, &pMemOs2->aPages[0], &cPagesRet);
297 if (!rc)
298 {
299 rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet);
300 pMemOs2->Core.u.Lock.R0Process = NIL_RTR0PROCESS;
301 *ppMem = &pMemOs2->Core;
302 return VINF_SUCCESS;
303 }
304 rtR0MemObjDelete(&pMemOs2->Core);
305 return RTErrConvertFromOS2(rc);
306}
307
308
309DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
310{
311 return VERR_NOT_SUPPORTED;
312}
313
314
315DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment,
316 RTR0PROCESS R0Process)
317{
318 return VERR_NOT_SUPPORTED;
319}
320
321
322DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
323 unsigned fProt, size_t offSub, size_t cbSub)
324{
325 AssertMsgReturn(!offSub && !cbSub, ("%#x %#x\n", offSub, cbSub), VERR_NOT_SUPPORTED);
326 AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);
327
328 /*
329 * Check that the specified alignment is supported.
330 */
331 if (uAlignment > PAGE_SIZE)
332 return VERR_NOT_SUPPORTED;
333
334
335/** @todo finish the implementation. */
336
337 int rc;
338 void *pvR0 = NULL;
339 PRTR0MEMOBJOS2 pMemToMapOs2 = (PRTR0MEMOBJOS2)pMemToMap;
340 switch (pMemToMapOs2->Core.enmType)
341 {
342 /*
343 * These has kernel mappings.
344 */
345 case RTR0MEMOBJTYPE_PAGE:
346 case RTR0MEMOBJTYPE_LOW:
347 case RTR0MEMOBJTYPE_CONT:
348 pvR0 = pMemToMapOs2->Core.pv;
349 break;
350
351 case RTR0MEMOBJTYPE_PHYS:
352 pvR0 = pMemToMapOs2->Core.pv;
353 if (!pvR0)
354 {
355 /* no ring-0 mapping, so allocate a mapping in the process. */
356 AssertMsgReturn(fProt & RTMEM_PROT_WRITE, ("%#x\n", fProt), VERR_NOT_SUPPORTED);
357 Assert(!pMemToMapOs2->Core.u.Phys.fAllocated);
358 ULONG ulPhys = pMemToMapOs2->Core.u.Phys.PhysBase;
359 rc = KernVMAlloc(pMemToMapOs2->Core.cb, VMDHA_PHYS, &pvR0, (PPVOID)&ulPhys, NULL);
360 if (rc)
361 return RTErrConvertFromOS2(rc);
362 pMemToMapOs2->Core.pv = pvR0;
363 }
364 break;
365
366 case RTR0MEMOBJTYPE_PHYS_NC:
367 AssertMsgFailed(("RTR0MEMOBJTYPE_PHYS_NC\n"));
368 return VERR_INTERNAL_ERROR_3;
369 break;
370
371 case RTR0MEMOBJTYPE_LOCK:
372 if (pMemToMapOs2->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
373 return VERR_NOT_SUPPORTED; /** @todo implement this... */
374 pvR0 = pMemToMapOs2->Core.pv;
375 break;
376
377 case RTR0MEMOBJTYPE_RES_VIRT:
378 case RTR0MEMOBJTYPE_MAPPING:
379 default:
380 AssertMsgFailed(("enmType=%d\n", pMemToMapOs2->Core.enmType));
381 return VERR_INTERNAL_ERROR;
382 }
383
384 /*
385 * Create a dummy mapping object for it.
386 *
387 * All mappings are read/write/execute in OS/2 and there isn't
388 * any cache options, so sharing is ok. And the main memory object
389 * isn't actually freed until all the mappings have been freed up
390 * (reference counting).
391 */
392 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_MAPPING,
393 pvR0, pMemToMapOs2->Core.cb);
394 if (pMemOs2)
395 {
396 pMemOs2->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
397 *ppMem = &pMemOs2->Core;
398 return VINF_SUCCESS;
399 }
400 return VERR_NO_MEMORY;
401}
402
403
404DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
405{
406 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
407 AssertMsgReturn(R3PtrFixed == (RTR3PTR)-1, ("%p\n", R3PtrFixed), VERR_NOT_SUPPORTED);
408 if (uAlignment > PAGE_SIZE)
409 return VERR_NOT_SUPPORTED;
410
411 int rc;
412 void *pvR0;
413 void *pvR3 = NULL;
414 PRTR0MEMOBJOS2 pMemToMapOs2 = (PRTR0MEMOBJOS2)pMemToMap;
415 switch (pMemToMapOs2->Core.enmType)
416 {
417 /*
418 * These has kernel mappings.
419 */
420 case RTR0MEMOBJTYPE_PAGE:
421 case RTR0MEMOBJTYPE_LOW:
422 case RTR0MEMOBJTYPE_CONT:
423 pvR0 = pMemToMapOs2->Core.pv;
424 break;
425
426 case RTR0MEMOBJTYPE_PHYS:
427 pvR0 = pMemToMapOs2->Core.pv;
428#if 0/* this is wrong. */
429 if (!pvR0)
430 {
431 /* no ring-0 mapping, so allocate a mapping in the process. */
432 AssertMsgReturn(fProt & RTMEM_PROT_WRITE, ("%#x\n", fProt), VERR_NOT_SUPPORTED);
433 Assert(!pMemToMapOs2->Core.u.Phys.fAllocated);
434 ULONG ulPhys = pMemToMapOs2->Core.u.Phys.PhysBase;
435 rc = KernVMAlloc(pMemToMapOs2->Core.cb, VMDHA_PHYS | VMDHA_PROCESS, &pvR3, (PPVOID)&ulPhys, NULL);
436 if (rc)
437 return RTErrConvertFromOS2(rc);
438 }
439 break;
440#endif
441 return VERR_NOT_SUPPORTED;
442
443 case RTR0MEMOBJTYPE_PHYS_NC:
444 AssertMsgFailed(("RTR0MEMOBJTYPE_PHYS_NC\n"));
445 return VERR_INTERNAL_ERROR_5;
446 break;
447
448 case RTR0MEMOBJTYPE_LOCK:
449 if (pMemToMapOs2->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
450 return VERR_NOT_SUPPORTED; /** @todo implement this... */
451 pvR0 = pMemToMapOs2->Core.pv;
452 break;
453
454 case RTR0MEMOBJTYPE_RES_VIRT:
455 case RTR0MEMOBJTYPE_MAPPING:
456 default:
457 AssertMsgFailed(("enmType=%d\n", pMemToMapOs2->Core.enmType));
458 return VERR_INTERNAL_ERROR;
459 }
460
461 /*
462 * Map the ring-0 memory into the current process.
463 */
464 if (!pvR3)
465 {
466 Assert(pvR0);
467 ULONG flFlags = 0;
468 if (uAlignment == PAGE_SIZE)
469 flFlags |= VMDHGP_4MB;
470 if (fProt & RTMEM_PROT_WRITE)
471 flFlags |= VMDHGP_WRITE;
472 rc = RTR0Os2DHVMGlobalToProcess(flFlags, pvR0, pMemToMapOs2->Core.cb, &pvR3);
473 if (rc)
474 return RTErrConvertFromOS2(rc);
475 }
476 Assert(pvR3);
477
478 /*
479 * Create a mapping object for it.
480 */
481 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_UOFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_MAPPING,
482 pvR3, pMemToMapOs2->Core.cb);
483 if (pMemOs2)
484 {
485 Assert(pMemOs2->Core.pv == pvR3);
486 pMemOs2->Core.u.Mapping.R0Process = R0Process;
487 *ppMem = &pMemOs2->Core;
488 return VINF_SUCCESS;
489 }
490 KernVMFree(pvR3);
491 return VERR_NO_MEMORY;
492}
493
494
495DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
496{
497 NOREF(pMem);
498 NOREF(offSub);
499 NOREF(cbSub);
500 NOREF(fProt);
501 return VERR_NOT_SUPPORTED;
502}
503
504
505DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
506{
507 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)pMem;
508
509 switch (pMemOs2->Core.enmType)
510 {
511 case RTR0MEMOBJTYPE_PAGE:
512 case RTR0MEMOBJTYPE_LOW:
513 case RTR0MEMOBJTYPE_LOCK:
514 case RTR0MEMOBJTYPE_PHYS_NC:
515 return pMemOs2->aPages[iPage].Addr;
516
517 case RTR0MEMOBJTYPE_CONT:
518 return pMemOs2->Core.u.Cont.Phys + (iPage << PAGE_SHIFT);
519
520 case RTR0MEMOBJTYPE_PHYS:
521 return pMemOs2->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT);
522
523 case RTR0MEMOBJTYPE_RES_VIRT:
524 case RTR0MEMOBJTYPE_MAPPING:
525 default:
526 return NIL_RTHCPHYS;
527 }
528}
529
530
531/**
532 * Expands the page list so we can index pages directly.
533 *
534 * @param paPages The page list array to fix.
535 * @param cPages The number of pages that's supposed to go into the list.
536 * @param cPagesRet The actual number of pages in the list.
537 */
538static void rtR0MemObjFixPageList(KernPageList_t *paPages, ULONG cPages, ULONG cPagesRet)
539{
540 Assert(cPages >= cPagesRet);
541 if (cPages != cPagesRet)
542 {
543 ULONG iIn = cPagesRet;
544 ULONG iOut = cPages;
545 do
546 {
547 iIn--;
548 iOut--;
549 Assert(iIn <= iOut);
550
551 KernPageList_t Page = paPages[iIn];
552 Assert(!(Page.Addr & PAGE_OFFSET_MASK));
553 Assert(Page.Size == RT_ALIGN_Z(Page.Size, PAGE_SIZE));
554
555 if (Page.Size > PAGE_SIZE)
556 {
557 do
558 {
559 Page.Size -= PAGE_SIZE;
560 paPages[iOut].Addr = Page.Addr + Page.Size;
561 paPages[iOut].Size = PAGE_SIZE;
562 iOut--;
563 } while (Page.Size > PAGE_SIZE);
564 }
565
566 paPages[iOut].Addr = Page.Addr;
567 paPages[iOut].Size = PAGE_SIZE;
568 } while ( iIn != iOut
569 && iIn > 0);
570 }
571}
572
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette