VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/os2/memobj-r0drv-os2.cpp@ 53517

Last change on this file since 53517 was 48935, checked in by vboxsync, 11 years ago

Runtime: Whitespace and svn:keyword cleanups by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 18.7 KB
Line 
1/* $Id: memobj-r0drv-os2.cpp 48935 2013-10-07 21:19:37Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, OS/2.
4 */
5
6/*
7 * Copyright (c) 2007 knut st. osmundsen <[email protected]>
8 *
9 * Permission is hereby granted, free of charge, to any person
10 * obtaining a copy of this software and associated documentation
11 * files (the "Software"), to deal in the Software without
12 * restriction, including without limitation the rights to use,
13 * copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the
15 * Software is furnished to do so, subject to the following
16 * conditions:
17 *
18 * The above copyright notice and this permission notice shall be
19 * included in all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
23 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
25 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
26 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
29 */
30
31
32/*******************************************************************************
33* Header Files *
34*******************************************************************************/
35#include "the-os2-kernel.h"
36
37#include <iprt/memobj.h>
38#include <iprt/mem.h>
39#include <iprt/err.h>
40#include <iprt/assert.h>
41#include <iprt/log.h>
42#include <iprt/param.h>
43#include <iprt/process.h>
44#include "internal/memobj.h"
45
46
47/*******************************************************************************
48* Structures and Typedefs *
49*******************************************************************************/
50/**
51 * The OS/2 version of the memory object structure.
52 */
53typedef struct RTR0MEMOBJDARWIN
54{
55 /** The core structure. */
56 RTR0MEMOBJINTERNAL Core;
57 /** Lock for the ring-3 / ring-0 pinned objectes.
58 * This member might not be allocated for some object types. */
59 KernVMLock_t Lock;
60 /** Array of physical pages.
61 * This array can be 0 in length for some object types. */
62 KernPageList_t aPages[1];
63} RTR0MEMOBJOS2, *PRTR0MEMOBJOS2;
64
65
66/*******************************************************************************
67* Internal Functions *
68*******************************************************************************/
69static void rtR0MemObjFixPageList(KernPageList_t *paPages, ULONG cPages, ULONG cPagesRet);
70
71
72DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
73{
74 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)pMem;
75 int rc;
76
77 switch (pMemOs2->Core.enmType)
78 {
79 case RTR0MEMOBJTYPE_PHYS_NC:
80 AssertMsgFailed(("RTR0MEMOBJTYPE_PHYS_NC\n"));
81 return VERR_INTERNAL_ERROR;
82 break;
83
84 case RTR0MEMOBJTYPE_PHYS:
85 if (!pMemOs2->Core.pv)
86 break;
87
88 case RTR0MEMOBJTYPE_MAPPING:
89 if (pMemOs2->Core.u.Mapping.R0Process == NIL_RTR0PROCESS)
90 break;
91
92 /* fall thru */
93 case RTR0MEMOBJTYPE_PAGE:
94 case RTR0MEMOBJTYPE_LOW:
95 case RTR0MEMOBJTYPE_CONT:
96 rc = KernVMFree(pMemOs2->Core.pv);
97 AssertMsg(!rc, ("rc=%d type=%d pv=%p cb=%#zx\n", rc, pMemOs2->Core.enmType, pMemOs2->Core.pv, pMemOs2->Core.cb));
98 break;
99
100 case RTR0MEMOBJTYPE_LOCK:
101 rc = KernVMUnlock(&pMemOs2->Lock);
102 AssertMsg(!rc, ("rc=%d\n", rc));
103 break;
104
105 case RTR0MEMOBJTYPE_RES_VIRT:
106 default:
107 AssertMsgFailed(("enmType=%d\n", pMemOs2->Core.enmType));
108 return VERR_INTERNAL_ERROR;
109 }
110
111 return VINF_SUCCESS;
112}
113
114
115DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
116{
117 NOREF(fExecutable);
118
119 /* create the object. */
120 const ULONG cPages = cb >> PAGE_SHIFT;
121 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJOS2, aPages[cPages]), RTR0MEMOBJTYPE_PAGE, NULL, cb);
122 if (!pMemOs2)
123 return VERR_NO_MEMORY;
124
125 /* do the allocation. */
126 int rc = KernVMAlloc(cb, VMDHA_FIXED, &pMemOs2->Core.pv, (PPVOID)-1, NULL);
127 if (!rc)
128 {
129 ULONG cPagesRet = cPages;
130 rc = KernLinToPageList(pMemOs2->Core.pv, cb, &pMemOs2->aPages[0], &cPagesRet);
131 if (!rc)
132 {
133 rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet);
134 *ppMem = &pMemOs2->Core;
135 return VINF_SUCCESS;
136 }
137 KernVMFree(pMemOs2->Core.pv);
138 }
139 rtR0MemObjDelete(&pMemOs2->Core);
140 return RTErrConvertFromOS2(rc);
141}
142
143
144DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
145{
146 NOREF(fExecutable);
147
148 /* create the object. */
149 const ULONG cPages = cb >> PAGE_SHIFT;
150 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJOS2, aPages[cPages]), RTR0MEMOBJTYPE_LOW, NULL, cb);
151 if (!pMemOs2)
152 return VERR_NO_MEMORY;
153
154 /* do the allocation. */
155 int rc = KernVMAlloc(cb, VMDHA_FIXED, &pMemOs2->Core.pv, (PPVOID)-1, NULL);
156 if (!rc)
157 {
158 ULONG cPagesRet = cPages;
159 rc = KernLinToPageList(pMemOs2->Core.pv, cb, &pMemOs2->aPages[0], &cPagesRet);
160 if (!rc)
161 {
162 rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet);
163 *ppMem = &pMemOs2->Core;
164 return VINF_SUCCESS;
165 }
166 KernVMFree(pMemOs2->Core.pv);
167 }
168 rtR0MemObjDelete(&pMemOs2->Core);
169 rc = RTErrConvertFromOS2(rc);
170 return rc == VERR_NO_MEMORY ? VERR_NO_LOW_MEMORY : rc;
171}
172
173
174DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
175{
176 NOREF(fExecutable);
177
178 /* create the object. */
179 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_CONT, NULL, cb);
180 if (!pMemOs2)
181 return VERR_NO_MEMORY;
182
183 /* do the allocation. */
184 ULONG ulPhys = ~0UL;
185 int rc = KernVMAlloc(cb, VMDHA_FIXED | VMDHA_CONTIG, &pMemOs2->Core.pv, (PPVOID)&ulPhys, NULL);
186 if (!rc)
187 {
188 Assert(ulPhys != ~0UL);
189 pMemOs2->Core.u.Cont.Phys = ulPhys;
190 *ppMem = &pMemOs2->Core;
191 return VINF_SUCCESS;
192 }
193 rtR0MemObjDelete(&pMemOs2->Core);
194 return RTErrConvertFromOS2(rc);
195}
196
197
198DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
199{
200 AssertMsgReturn(PhysHighest >= 16 *_1M, ("PhysHigest=%RHp\n", PhysHighest), VERR_NOT_SUPPORTED);
201
202 /** @todo alignment */
203 if (uAlignment != PAGE_SIZE)
204 return VERR_NOT_SUPPORTED;
205
206 /* create the object. */
207 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_PHYS, NULL, cb);
208 if (!pMemOs2)
209 return VERR_NO_MEMORY;
210
211 /* do the allocation. */
212 ULONG ulPhys = ~0UL;
213 int rc = KernVMAlloc(cb, VMDHA_FIXED | VMDHA_CONTIG | (PhysHighest < _4G ? VMDHA_16M : 0), &pMemOs2->Core.pv, (PPVOID)&ulPhys, NULL);
214 if (!rc)
215 {
216 Assert(ulPhys != ~0UL);
217 pMemOs2->Core.u.Phys.fAllocated = true;
218 pMemOs2->Core.u.Phys.PhysBase = ulPhys;
219 *ppMem = &pMemOs2->Core;
220 return VINF_SUCCESS;
221 }
222 rtR0MemObjDelete(&pMemOs2->Core);
223 return RTErrConvertFromOS2(rc);
224}
225
226
227DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
228{
229 /** @todo rtR0MemObjNativeAllocPhys / darwin. */
230 return rtR0MemObjNativeAllocPhys(ppMem, cb, PhysHighest, PAGE_SIZE);
231}
232
233
234DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
235{
236 AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE, VERR_NOT_SUPPORTED);
237
238 /* create the object. */
239 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_PHYS, NULL, cb);
240 if (!pMemOs2)
241 return VERR_NO_MEMORY;
242
243 /* there is no allocation here, right? it needs to be mapped somewhere first. */
244 pMemOs2->Core.u.Phys.fAllocated = false;
245 pMemOs2->Core.u.Phys.PhysBase = Phys;
246 pMemOs2->Core.u.Phys.uCachePolicy = uCachePolicy;
247 *ppMem = &pMemOs2->Core;
248 return VINF_SUCCESS;
249}
250
251
252DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess,
253 RTR0PROCESS R0Process)
254{
255 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
256
257 /* create the object. */
258 const ULONG cPages = cb >> PAGE_SHIFT;
259 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJOS2, aPages[cPages]), RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb);
260 if (!pMemOs2)
261 return VERR_NO_MEMORY;
262
263 /* lock it. */
264 ULONG cPagesRet = cPages;
265 int rc = KernVMLock(VMDHL_LONG | (fAccess & RTMEM_PROT_WRITE ? VMDHL_WRITE : 0),
266 (void *)R3Ptr, cb, &pMemOs2->Lock, &pMemOs2->aPages[0], &cPagesRet);
267 if (!rc)
268 {
269 rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet);
270 Assert(cb == pMemOs2->Core.cb);
271 Assert(R3Ptr == (RTR3PTR)pMemOs2->Core.pv);
272 pMemOs2->Core.u.Lock.R0Process = R0Process;
273 *ppMem = &pMemOs2->Core;
274 return VINF_SUCCESS;
275 }
276 rtR0MemObjDelete(&pMemOs2->Core);
277 return RTErrConvertFromOS2(rc);
278}
279
280
281DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
282{
283 /* create the object. */
284 const ULONG cPages = cb >> PAGE_SHIFT;
285 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJOS2, aPages[cPages]), RTR0MEMOBJTYPE_LOCK, pv, cb);
286 if (!pMemOs2)
287 return VERR_NO_MEMORY;
288
289 /* lock it. */
290 ULONG cPagesRet = cPages;
291 int rc = KernVMLock(VMDHL_LONG | (fAccess & RTMEM_PROT_WRITE ? VMDHL_WRITE : 0),
292 pv, cb, &pMemOs2->Lock, &pMemOs2->aPages[0], &cPagesRet);
293 if (!rc)
294 {
295 rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet);
296 pMemOs2->Core.u.Lock.R0Process = NIL_RTR0PROCESS;
297 *ppMem = &pMemOs2->Core;
298 return VINF_SUCCESS;
299 }
300 rtR0MemObjDelete(&pMemOs2->Core);
301 return RTErrConvertFromOS2(rc);
302}
303
304
305DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
306{
307 return VERR_NOT_SUPPORTED;
308}
309
310
311DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment,
312 RTR0PROCESS R0Process)
313{
314 return VERR_NOT_SUPPORTED;
315}
316
317
318DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
319 unsigned fProt, size_t offSub, size_t cbSub)
320{
321 AssertMsgReturn(!offSub && !cbSub, ("%#x %#x\n", offSub, cbSub), VERR_NOT_SUPPORTED);
322 AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);
323
324 /*
325 * Check that the specified alignment is supported.
326 */
327 if (uAlignment > PAGE_SIZE)
328 return VERR_NOT_SUPPORTED;
329
330
331/** @todo finish the implementation. */
332
333 int rc;
334 void *pvR0 = NULL;
335 PRTR0MEMOBJOS2 pMemToMapOs2 = (PRTR0MEMOBJOS2)pMemToMap;
336 switch (pMemToMapOs2->Core.enmType)
337 {
338 /*
339 * These has kernel mappings.
340 */
341 case RTR0MEMOBJTYPE_PAGE:
342 case RTR0MEMOBJTYPE_LOW:
343 case RTR0MEMOBJTYPE_CONT:
344 pvR0 = pMemToMapOs2->Core.pv;
345 break;
346
347 case RTR0MEMOBJTYPE_PHYS:
348 pvR0 = pMemToMapOs2->Core.pv;
349 if (!pvR0)
350 {
351 /* no ring-0 mapping, so allocate a mapping in the process. */
352 AssertMsgReturn(fProt & RTMEM_PROT_WRITE, ("%#x\n", fProt), VERR_NOT_SUPPORTED);
353 Assert(!pMemToMapOs2->Core.u.Phys.fAllocated);
354 ULONG ulPhys = pMemToMapOs2->Core.u.Phys.PhysBase;
355 rc = KernVMAlloc(pMemToMapOs2->Core.cb, VMDHA_PHYS, &pvR0, (PPVOID)&ulPhys, NULL);
356 if (rc)
357 return RTErrConvertFromOS2(rc);
358 pMemToMapOs2->Core.pv = pvR0;
359 }
360 break;
361
362 case RTR0MEMOBJTYPE_PHYS_NC:
363 AssertMsgFailed(("RTR0MEMOBJTYPE_PHYS_NC\n"));
364 return VERR_INTERNAL_ERROR_3;
365 break;
366
367 case RTR0MEMOBJTYPE_LOCK:
368 if (pMemToMapOs2->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
369 return VERR_NOT_SUPPORTED; /** @todo implement this... */
370 pvR0 = pMemToMapOs2->Core.pv;
371 break;
372
373 case RTR0MEMOBJTYPE_RES_VIRT:
374 case RTR0MEMOBJTYPE_MAPPING:
375 default:
376 AssertMsgFailed(("enmType=%d\n", pMemToMapOs2->Core.enmType));
377 return VERR_INTERNAL_ERROR;
378 }
379
380 /*
381 * Create a dummy mapping object for it.
382 *
383 * All mappings are read/write/execute in OS/2 and there isn't
384 * any cache options, so sharing is ok. And the main memory object
385 * isn't actually freed until all the mappings have been freed up
386 * (reference counting).
387 */
388 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_MAPPING, pvR0, pMemToMapOs2->Core.cb);
389 if (pMemOs2)
390 {
391 pMemOs2->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
392 *ppMem = &pMemOs2->Core;
393 return VINF_SUCCESS;
394 }
395 return VERR_NO_MEMORY;
396}
397
398
399DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
400{
401 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
402 AssertMsgReturn(R3PtrFixed == (RTR3PTR)-1, ("%p\n", R3PtrFixed), VERR_NOT_SUPPORTED);
403 if (uAlignment > PAGE_SIZE)
404 return VERR_NOT_SUPPORTED;
405
406 int rc;
407 void *pvR0;
408 void *pvR3 = NULL;
409 PRTR0MEMOBJOS2 pMemToMapOs2 = (PRTR0MEMOBJOS2)pMemToMap;
410 switch (pMemToMapOs2->Core.enmType)
411 {
412 /*
413 * These has kernel mappings.
414 */
415 case RTR0MEMOBJTYPE_PAGE:
416 case RTR0MEMOBJTYPE_LOW:
417 case RTR0MEMOBJTYPE_CONT:
418 pvR0 = pMemToMapOs2->Core.pv;
419 break;
420
421 case RTR0MEMOBJTYPE_PHYS:
422 pvR0 = pMemToMapOs2->Core.pv;
423#if 0/* this is wrong. */
424 if (!pvR0)
425 {
426 /* no ring-0 mapping, so allocate a mapping in the process. */
427 AssertMsgReturn(fProt & RTMEM_PROT_WRITE, ("%#x\n", fProt), VERR_NOT_SUPPORTED);
428 Assert(!pMemToMapOs2->Core.u.Phys.fAllocated);
429 ULONG ulPhys = pMemToMapOs2->Core.u.Phys.PhysBase;
430 rc = KernVMAlloc(pMemToMapOs2->Core.cb, VMDHA_PHYS | VMDHA_PROCESS, &pvR3, (PPVOID)&ulPhys, NULL);
431 if (rc)
432 return RTErrConvertFromOS2(rc);
433 }
434 break;
435#endif
436 return VERR_NOT_SUPPORTED;
437
438 case RTR0MEMOBJTYPE_PHYS_NC:
439 AssertMsgFailed(("RTR0MEMOBJTYPE_PHYS_NC\n"));
440 return VERR_INTERNAL_ERROR_5;
441 break;
442
443 case RTR0MEMOBJTYPE_LOCK:
444 if (pMemToMapOs2->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
445 return VERR_NOT_SUPPORTED; /** @todo implement this... */
446 pvR0 = pMemToMapOs2->Core.pv;
447 break;
448
449 case RTR0MEMOBJTYPE_RES_VIRT:
450 case RTR0MEMOBJTYPE_MAPPING:
451 default:
452 AssertMsgFailed(("enmType=%d\n", pMemToMapOs2->Core.enmType));
453 return VERR_INTERNAL_ERROR;
454 }
455
456 /*
457 * Map the ring-0 memory into the current process.
458 */
459 if (!pvR3)
460 {
461 Assert(pvR0);
462 ULONG flFlags = 0;
463 if (uAlignment == PAGE_SIZE)
464 flFlags |= VMDHGP_4MB;
465 if (fProt & RTMEM_PROT_WRITE)
466 flFlags |= VMDHGP_WRITE;
467 rc = RTR0Os2DHVMGlobalToProcess(flFlags, pvR0, pMemToMapOs2->Core.cb, &pvR3);
468 if (rc)
469 return RTErrConvertFromOS2(rc);
470 }
471 Assert(pvR3);
472
473 /*
474 * Create a mapping object for it.
475 */
476 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_MAPPING, pvR3, pMemToMapOs2->Core.cb);
477 if (pMemOs2)
478 {
479 Assert(pMemOs2->Core.pv == pvR3);
480 pMemOs2->Core.u.Mapping.R0Process = R0Process;
481 *ppMem = &pMemOs2->Core;
482 return VINF_SUCCESS;
483 }
484 KernVMFree(pvR3);
485 return VERR_NO_MEMORY;
486}
487
488
489DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
490{
491 NOREF(pMem);
492 NOREF(offSub);
493 NOREF(cbSub);
494 NOREF(fProt);
495 return VERR_NOT_SUPPORTED;
496}
497
498
499DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
500{
501 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)pMem;
502
503 switch (pMemOs2->Core.enmType)
504 {
505 case RTR0MEMOBJTYPE_PAGE:
506 case RTR0MEMOBJTYPE_LOW:
507 case RTR0MEMOBJTYPE_LOCK:
508 case RTR0MEMOBJTYPE_PHYS_NC:
509 return pMemOs2->aPages[iPage].Addr;
510
511 case RTR0MEMOBJTYPE_CONT:
512 return pMemOs2->Core.u.Cont.Phys + (iPage << PAGE_SHIFT);
513
514 case RTR0MEMOBJTYPE_PHYS:
515 return pMemOs2->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT);
516
517 case RTR0MEMOBJTYPE_RES_VIRT:
518 case RTR0MEMOBJTYPE_MAPPING:
519 default:
520 return NIL_RTHCPHYS;
521 }
522}
523
524
525/**
526 * Expands the page list so we can index pages directly.
527 *
528 * @param paPages The page list array to fix.
529 * @param cPages The number of pages that's supposed to go into the list.
530 * @param cPagesRet The actual number of pages in the list.
531 */
532static void rtR0MemObjFixPageList(KernPageList_t *paPages, ULONG cPages, ULONG cPagesRet)
533{
534 Assert(cPages >= cPagesRet);
535 if (cPages != cPagesRet)
536 {
537 ULONG iIn = cPagesRet;
538 ULONG iOut = cPages;
539 do
540 {
541 iIn--;
542 iOut--;
543 Assert(iIn <= iOut);
544
545 KernPageList_t Page = paPages[iIn];
546 Assert(!(Page.Addr & PAGE_OFFSET_MASK));
547 Assert(Page.Size == RT_ALIGN_Z(Page.Size, PAGE_SIZE));
548
549 if (Page.Size > PAGE_SIZE)
550 {
551 do
552 {
553 Page.Size -= PAGE_SIZE;
554 paPages[iOut].Addr = Page.Addr + Page.Size;
555 paPages[iOut].Size = PAGE_SIZE;
556 iOut--;
557 } while (Page.Size > PAGE_SIZE);
558 }
559
560 paPages[iOut].Addr = Page.Addr;
561 paPages[iOut].Size = PAGE_SIZE;
562 } while ( iIn != iOut
563 && iIn > 0);
564 }
565}
566
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette