VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/os2/memobj-r0drv-os2.cpp@ 3225

Last change on this file since 3225 was 3225, checked in by vboxsync, 17 years ago

RTR0MemObjMapKernel for OS/2.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 16.8 KB
Line 
1/* $Id: memobj-r0drv-os2.cpp 3225 2007-06-22 04:37:40Z vboxsync $ */
2/** @file
3 * innotek Portable Runtime - Ring-0 Memory Objects, OS/2.
4 */
5
6/*
7 * Copyright (c) 2007 knut st. osmundsen <[email protected]>
8 *
9 * Permission is hereby granted, free of charge, to any person
10 * obtaining a copy of this software and associated documentation
11 * files (the "Software"), to deal in the Software without
12 * restriction, including without limitation the rights to use,
13 * copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the
15 * Software is furnished to do so, subject to the following
16 * conditions:
17 *
18 * The above copyright notice and this permission notice shall be
19 * included in all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
23 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
25 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
26 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
29 */
30
31
32/*******************************************************************************
33* Header Files *
34*******************************************************************************/
35#include "the-os2-kernel.h"
36
37#include <iprt/memobj.h>
38#include <iprt/mem.h>
39#include <iprt/err.h>
40#include <iprt/assert.h>
41#include <iprt/log.h>
42#include <iprt/param.h>
43#include <iprt/process.h>
44#include "internal/memobj.h"
45
46
47/*******************************************************************************
48* Structures and Typedefs *
49*******************************************************************************/
50/**
51 * The OS/2 version of the memory object structure.
52 */
53typedef struct RTR0MEMOBJDARWIN
54{
55 /** The core structure. */
56 RTR0MEMOBJINTERNAL Core;
57 /** Lock for the ring-3 / ring-0 pinned objectes.
58 * This member might not be allocated for some object types. */
59 KernVMLock_t Lock;
60 /** Array of physical pages.
61 * This array can be 0 in length for some object types. */
62 KernPageList_t aPages[1];
63} RTR0MEMOBJOS2, *PRTR0MEMOBJOS2;
64
65
66/*******************************************************************************
67* Internal Functions *
68*******************************************************************************/
69static void rtR0MemObjFixPageList(KernPageList_t *paPages, ULONG cPages, ULONG cPagesRet);
70
71
72int rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
73{
74 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)pMem;
75 int rc;
76
77 switch (pMemOs2->Core.enmType)
78 {
79 case RTR0MEMOBJTYPE_PHYS:
80 if (!pMemOs2->Core.pv)
81 break;
82
83 case RTR0MEMOBJTYPE_MAPPING:
84 if (pMemOs2->Core.u.Mapping.R0Process == NIL_RTR0PROCESS)
85 break;
86
87 /* fall thru */
88 case RTR0MEMOBJTYPE_PAGE:
89 case RTR0MEMOBJTYPE_LOW:
90 case RTR0MEMOBJTYPE_CONT:
91 rc = KernVMFree(pMemOs2->Core.pv);
92 AssertMsg(!rc, ("rc=%d type=%d pv=%p cb=%#zx\n", rc, pMemOs2->Core.enmType, pMemOs2->Core.pv, pMemOs2->Core.cb));
93 break;
94
95 case RTR0MEMOBJTYPE_LOCK:
96 rc = KernVMUnlock(&pMemOs2->Lock);
97 AssertMsg(!rc, ("rc=%d\n", rc));
98 break;
99
100 case RTR0MEMOBJTYPE_RES_VIRT:
101 default:
102 AssertMsgFailed(("enmType=%d\n", pMemOs2->Core.enmType));
103 return VERR_INTERNAL_ERROR;
104 }
105
106 return VINF_SUCCESS;
107}
108
109
110int rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
111{
112 NOREF(fExecutable);
113
114 /* create the object. */
115 const ULONG cPages = cb >> PAGE_SHIFT;
116 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJOS2, aPages[cPages]), RTR0MEMOBJTYPE_PAGE, NULL, cb);
117 if (!pMemOs2)
118 return VERR_NO_MEMORY;
119
120 /* do the allocation. */
121 int rc = KernVMAlloc(cb, VMDHA_FIXED, &pMemOs2->Core.pv, (PPVOID)-1, NULL);
122 if (!rc)
123 {
124 ULONG cPagesRet = cPages;
125 rc = KernLinToPageList(pMemOs2->Core.pv, cb, &pMemOs2->aPages[0], &cPagesRet);
126 if (!rc)
127 {
128 rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet);
129 *ppMem = &pMemOs2->Core;
130 return VINF_SUCCESS;
131 }
132 KernVMFree(pMemOs2->Core.pv);
133 }
134 rtR0MemObjDelete(&pMemOs2->Core);
135 return RTErrConvertFromOS2(rc);
136}
137
138
139int rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
140{
141 NOREF(fExecutable);
142
143 /* create the object. */
144 const ULONG cPages = cb >> PAGE_SHIFT;
145 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJOS2, aPages[cPages]), RTR0MEMOBJTYPE_LOW, NULL, cb);
146 if (!pMemOs2)
147 return VERR_NO_MEMORY;
148
149 /* do the allocation. */
150 int rc = KernVMAlloc(cb, VMDHA_FIXED, &pMemOs2->Core.pv, (PPVOID)-1, NULL);
151 if (!rc)
152 {
153 ULONG cPagesRet = cPages;
154 rc = KernLinToPageList(pMemOs2->Core.pv, cb, &pMemOs2->aPages[0], &cPagesRet);
155 if (!rc)
156 {
157 rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet);
158 *ppMem = &pMemOs2->Core;
159 return VINF_SUCCESS;
160 }
161 KernVMFree(pMemOs2->Core.pv);
162 }
163 rtR0MemObjDelete(&pMemOs2->Core);
164 return RTErrConvertFromOS2(rc);
165}
166
167
168int rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
169{
170 NOREF(fExecutable);
171
172 /* create the object. */
173 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_CONT, NULL, cb);
174 if (!pMemOs2)
175 return VERR_NO_MEMORY;
176
177 /* do the allocation. */
178 ULONG ulPhys = ~0UL;
179 int rc = KernVMAlloc(cb, VMDHA_FIXED | VMDHA_CONTIG, &pMemOs2->Core.pv, (PPVOID)&ulPhys, NULL);
180 if (!rc)
181 {
182 Assert(ulPhys != ~0UL);
183 pMemOs2->Core.u.Cont.Phys = ulPhys;
184 *ppMem = &pMemOs2->Core;
185 return VINF_SUCCESS;
186 }
187 rtR0MemObjDelete(&pMemOs2->Core);
188 return RTErrConvertFromOS2(rc);
189}
190
191
192int rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
193{
194 AssertMsgReturn(PhysHighest >= 16 *_1M, ("PhysHigest=%VHp\n", PhysHighest), VERR_NOT_IMPLEMENTED);
195
196 /* create the object. */
197 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_PHYS, NULL, cb);
198 if (!pMemOs2)
199 return VERR_NO_MEMORY;
200
201 /* do the allocation. */
202 ULONG ulPhys = ~0UL;
203 int rc = KernVMAlloc(cb, VMDHA_FIXED | VMDHA_CONTIG | (PhysHighest < _4G ? VMDHA_16M : 0), &pMemOs2->Core.pv, (PPVOID)&ulPhys, NULL);
204 if (!rc)
205 {
206 Assert(ulPhys != ~0UL);
207 pMemOs2->Core.u.Phys.fAllocated = true;
208 pMemOs2->Core.u.Phys.PhysBase = ulPhys;
209 *ppMem = &pMemOs2->Core;
210 return VINF_SUCCESS;
211 }
212 rtR0MemObjDelete(&pMemOs2->Core);
213 return RTErrConvertFromOS2(rc);
214}
215
216
217int rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb)
218{
219 /* create the object. */
220 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_PHYS, NULL, cb);
221 if (!pMemOs2)
222 return VERR_NO_MEMORY;
223
224 /* there is no allocation here, right? it needs to be mapped somewhere first. */
225 pMemOs2->Core.u.Phys.fAllocated = false;
226 pMemOs2->Core.u.Phys.PhysBase = Phys;
227 *ppMem = &pMemOs2->Core;
228 return VINF_SUCCESS;
229}
230
231
232int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, RTR0PROCESS R0Process)
233{
234 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
235
236 /* create the object. */
237 const ULONG cPages = cb >> PAGE_SHIFT;
238 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJOS2, aPages[cPages]), RTR0MEMOBJTYPE_LOCK, pv, cb);
239 if (!pMemOs2)
240 return VERR_NO_MEMORY;
241
242 /* lock it. */
243 ULONG cPagesRet = cPages;
244 int rc = KernVMLock(VMDHL_LONG | VMDHL_WRITE, pv, cb, &pMemOs2->Lock, &pMemOs2->aPages[0], &cPagesRet);
245 if (!rc)
246 {
247 rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet);
248 Assert(cb == pMemOs2->Core.cb);
249 Assert(pv == pMemOs2->Core.pv);
250 pMemOs2->Core.u.Lock.R0Process = R0Process;
251 *ppMem = &pMemOs2->Core;
252 return VINF_SUCCESS;
253 }
254 rtR0MemObjDelete(&pMemOs2->Core);
255 return RTErrConvertFromOS2(rc);
256}
257
258
259int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb)
260{
261 /* create the object. */
262 const ULONG cPages = cb >> PAGE_SHIFT;
263 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJOS2, aPages[cPages]), RTR0MEMOBJTYPE_LOCK, pv, cb);
264 if (!pMemOs2)
265 return VERR_NO_MEMORY;
266
267 /* lock it. */
268 ULONG cPagesRet = cPages;
269 int rc = KernVMLock(VMDHL_LONG | VMDHL_WRITE, pv, cb, &pMemOs2->Lock, &pMemOs2->aPages[0], &cPagesRet);
270 if (!rc)
271 {
272 rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet);
273 pMemOs2->Core.u.Lock.R0Process = NIL_RTR0PROCESS;
274 *ppMem = &pMemOs2->Core;
275 return VINF_SUCCESS;
276 }
277 rtR0MemObjDelete(&pMemOs2->Core);
278 return RTErrConvertFromOS2(rc);
279}
280
281
282int rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
283{
284 return VERR_NOT_IMPLEMENTED;
285}
286
287
288int rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
289{
290 return VERR_NOT_IMPLEMENTED;
291}
292
293
294int rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment, unsigned fProt)
295{
296 AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);
297
298/** @todo finish the implementation. */
299
300 int rc;
301 void *pvR0 = NULL;
302 PRTR0MEMOBJOS2 pMemToMapOs2 = (PRTR0MEMOBJOS2)pMemToMap;
303 switch (pMemToMapOs2->Core.enmType)
304 {
305 /*
306 * These has kernel mappings.
307 */
308 case RTR0MEMOBJTYPE_PAGE:
309 case RTR0MEMOBJTYPE_LOW:
310 case RTR0MEMOBJTYPE_CONT:
311 pvR0 = pMemToMapOs2->Core.pv;
312 break;
313
314 case RTR0MEMOBJTYPE_PHYS:
315 pvR0 = pMemToMapOs2->Core.pv;
316 if (!pvR0)
317 {
318 /* no ring-0 mapping, so allocate a mapping in the process. */
319 AssertMsgReturn(uAlignment == PAGE_SIZE, ("%#zx\n", uAlignment), VERR_NOT_SUPPORTED);
320 AssertMsgReturn(fProt & RTMEM_PROT_WRITE, ("%#x\n", fProt), VERR_NOT_SUPPORTED);
321 Assert(!pMemToMapOs2->Core.u.Phys.fAllocated);
322 ULONG ulPhys = pMemToMapOs2->Core.u.Phys.PhysBase;
323 rc = KernVMAlloc(pMemToMapOs2->Core.cb, VMDHA_PHYS/* | VMDHA_SHARED?*/, &pvR0, (PPVOID)&ulPhys, NULL);
324 if (rc)
325 return RTErrConvertFromOS2(rc);
326 pMemToMapOs2->Core.pv = pvR0;
327 }
328 break;
329
330 case RTR0MEMOBJTYPE_LOCK:
331 if (pMemToMapOs2->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
332 return VERR_NOT_SUPPORTED; /** @todo implement this... */
333 pvR0 = pMemToMapOs2->Core.pv;
334 break;
335
336 case RTR0MEMOBJTYPE_RES_VIRT:
337 case RTR0MEMOBJTYPE_MAPPING:
338 default:
339 AssertMsgFailed(("enmType=%d\n", pMemToMapOs2->Core.enmType));
340 return VERR_INTERNAL_ERROR;
341 }
342
343 /*
344 * Create a dummy mapping object for it.
345 *
346 * All mappings are read/write/execute in OS/2 and there isn't
347 * any cache options, so sharing is ok. And the main memory object
348 * isn't actually freed until all the mappings have been freed up
349 * (reference counting).
350 */
351 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_MAPPING, pvR0, pMemToMapOs2->Core.cb);
352 if (pMemOs2)
353 {
354 pMemOs2->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
355 *ppMem = &pMemOs2->Core;
356 return VINF_SUCCESS;
357 }
358 return VERR_NO_MEMORY;
359}
360
361
362int rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
363{
364 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
365 AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);
366
367 int rc;
368 void *pvR0;
369 void *pvR3 = NULL;
370 PRTR0MEMOBJOS2 pMemToMapOs2 = (PRTR0MEMOBJOS2)pMemToMap;
371 switch (pMemToMapOs2->Core.enmType)
372 {
373 /*
374 * These has kernel mappings.
375 */
376 case RTR0MEMOBJTYPE_PAGE:
377 case RTR0MEMOBJTYPE_LOW:
378 case RTR0MEMOBJTYPE_CONT:
379 pvR0 = pMemToMapOs2->Core.pv;
380 break;
381
382 case RTR0MEMOBJTYPE_PHYS:
383 pvR0 = pMemToMapOs2->Core.pv;
384#if 0/* this is wrong. */
385 if (!pvR0)
386 {
387 /* no ring-0 mapping, so allocate a mapping in the process. */
388 AssertMsgReturn(uAlignment == PAGE_SIZE, ("%#zx\n", uAlignment), VERR_NOT_SUPPORTED);
389 AssertMsgReturn(fProt & RTMEM_PROT_WRITE, ("%#x\n", fProt), VERR_NOT_SUPPORTED);
390 Assert(!pMemToMapOs2->Core.u.Phys.fAllocated);
391 ULONG ulPhys = pMemToMapOs2->Core.u.Phys.PhysBase;
392 rc = KernVMAlloc(pMemToMapOs2->Core.cb, VMDHA_PHYS | VMDHA_PROCESS, &pvR3, (PPVOID)&ulPhys, NULL);
393 if (rc)
394 return RTErrConvertFromOS2(rc);
395 }
396 break;
397#endif
398 return VERR_NOT_SUPPORTED;
399
400 case RTR0MEMOBJTYPE_LOCK:
401 if (pMemToMapOs2->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
402 return VERR_NOT_SUPPORTED; /** @todo implement this... */
403 pvR0 = pMemToMapOs2->Core.pv;
404 break;
405
406 case RTR0MEMOBJTYPE_RES_VIRT:
407 case RTR0MEMOBJTYPE_MAPPING:
408 default:
409 AssertMsgFailed(("enmType=%d\n", pMemToMapOs2->Core.enmType));
410 return VERR_INTERNAL_ERROR;
411 }
412
413 /*
414 * Map the ring-0 memory into the current process.
415 */
416 if (!pvR3)
417 {
418 Assert(pvR0);
419 ULONG flFlags = 0;
420 if (uAlignment == PAGE_SIZE)
421 flFlags |= VMDHGP_4MB;
422 if (fProt & RTMEM_PROT_WRITE)
423 flFlags |= VMDHGP_WRITE;
424 rc = RTR0Os2DHVMGlobalToProcess(flFlags, pvR0, pMemToMapOs2->Core.cb, &pvR3);
425 if (rc)
426 return RTErrConvertFromOS2(rc);
427 }
428 Assert(pvR3);
429
430 /*
431 * Create a mapping object for it.
432 */
433 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_MAPPING, pvR3, pMemToMapOs2->Core.cb);
434 if (pMemOs2)
435 {
436 Assert(pMemOs2->Core.pv == pvR3);
437 pMemOs2->Core.u.Mapping.R0Process = R0Process;
438 *ppMem = &pMemOs2->Core;
439 return VINF_SUCCESS;
440 }
441 KernVMFree(pvR3);
442 return VERR_NO_MEMORY;
443}
444
445
446RTHCPHYS rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, unsigned iPage)
447{
448 PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)pMem;
449
450 switch (pMemOs2->Core.enmType)
451 {
452 case RTR0MEMOBJTYPE_PAGE:
453 case RTR0MEMOBJTYPE_LOW:
454 case RTR0MEMOBJTYPE_LOCK:
455 return pMemOs2->aPages[iPage].Addr;
456
457 case RTR0MEMOBJTYPE_CONT:
458 return pMemOs2->Core.u.Cont.Phys + (iPage << PAGE_SHIFT);
459
460 case RTR0MEMOBJTYPE_PHYS:
461 return pMemOs2->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT);
462
463 case RTR0MEMOBJTYPE_RES_VIRT:
464 case RTR0MEMOBJTYPE_MAPPING:
465 default:
466 return NIL_RTHCPHYS;
467 }
468}
469
470
471/**
472 * Expands the page list so we can index pages directly.
473 *
474 * @param paPages The page list array to fix.
475 * @param cPages The number of pages that's supposed to go into the list.
476 * @param cPagesRet The actual number of pages in the list.
477 */
478static void rtR0MemObjFixPageList(KernPageList_t *paPages, ULONG cPages, ULONG cPagesRet)
479{
480 Assert(cPages >= cPagesRet);
481 if (cPages != cPagesRet)
482 {
483 ULONG iIn = cPagesRet;
484 ULONG iOut = cPages;
485 do
486 {
487 iIn--;
488 iOut--;
489 Assert(iIn <= iOut);
490
491 KernPageList_t Page = paPages[iIn];
492 Assert(!(Page.Addr & PAGE_OFFSET_MASK));
493 Assert(Page.Size == RT_ALIGN_Z(Page.Size, PAGE_SIZE));
494
495 if (Page.Size > PAGE_SIZE)
496 {
497 do
498 {
499 Page.Size -= PAGE_SIZE;
500 paPages[iOut].Addr = Page.Addr + Page.Size;
501 paPages[iOut].Size = PAGE_SIZE;
502 iOut--;
503 } while (Page.Size > PAGE_SIZE);
504 }
505
506 paPages[iOut].Addr = Page.Addr;
507 paPages[iOut].Size = PAGE_SIZE;
508 } while ( iIn != iOut
509 && iIn > 0);
510 }
511}
512
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette