VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/nt/memobj-r0drv-nt.cpp@ 4725

Last change on this file since 4725 was 4725, checked in by vboxsync, 18 years ago

Use MmSecureVirtualMemory to have some limited protection with usermode allocations

  • Property svn:keywords set to Id
File size: 25.3 KB
Line 
1/* $Id: memobj-r0drv-nt.cpp 4725 2007-09-12 10:12:31Z vboxsync $ */
2/** @file
3 * innotek Portable Runtime - Ring-0 Memory Objects, NT.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#include "the-nt-kernel.h"
23
24#include <iprt/memobj.h>
25#include <iprt/alloc.h>
26#include <iprt/assert.h>
27#include <iprt/log.h>
28#include <iprt/param.h>
29#include <iprt/string.h>
30#include <iprt/process.h>
31#include "internal/memobj.h"
32
33
34/*******************************************************************************
35* Defined Constants And Macros *
36*******************************************************************************/
37/** Maximum number of bytes we try to lock down in one go.
38 * This is supposed to have a limit right below 256MB, but this appears
39 * to actually be much lower. The values here have been determined experimentally.
40 */
41#ifdef RT_ARCH_X86
42# define MAX_LOCK_MEM_SIZE (32*1024*1024) /* 32MB */
43#endif
44#ifdef RT_ARCH_AMD64
45# define MAX_LOCK_MEM_SIZE (24*1024*1024) /* 24MB */
46#endif
47
48
49/*******************************************************************************
50* Structures and Typedefs *
51*******************************************************************************/
52/**
53 * The NT version of the memory object structure.
54 */
55typedef struct RTR0MEMOBJNT
56{
57 /** The core structure. */
58 RTR0MEMOBJINTERNAL Core;
59#ifndef IPRT_TARGET_NT4
60 /** Used MmAllocatePagesForMdl(). */
61 bool fAllocatedPagesForMdl;
62#endif
63 /** Pointer returned by MmSecureVirtualMemory */
64 PVOID pvSecureMem;
65 /** The number of PMDLs (memory descriptor lists) in the array. */
66 uint32_t cMdls;
67 /** Array of MDL pointers. (variable size) */
68 PMDL apMdls[1];
69} RTR0MEMOBJNT, *PRTR0MEMOBJNT;
70
71
72int rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
73{
74 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
75
76 /*
77 * Deal with it on a per type basis (just as a variation).
78 */
79 switch (pMemNt->Core.enmType)
80 {
81 case RTR0MEMOBJTYPE_LOW:
82#ifndef IPRT_TARGET_NT4
83 if (pMemNt->fAllocatedPagesForMdl)
84 {
85 Assert(pMemNt->Core.pv && pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
86 MmUnmapLockedPages(pMemNt->Core.pv, pMemNt->apMdls[0]);
87 pMemNt->Core.pv = NULL;
88 if (pMemNt->pvSecureMem)
89 {
90 MmUnsecureVirtualMemory(pMemNt->pvSecureMem);
91 pMemNt->pvSecureMem = NULL;
92 }
93
94 MmFreePagesFromMdl(pMemNt->apMdls[0]);
95 ExFreePool(pMemNt->apMdls[0]);
96 pMemNt->apMdls[0] = NULL;
97 pMemNt->cMdls = 0;
98 break;
99 }
100#endif
101 AssertFailed();
102 break;
103
104 case RTR0MEMOBJTYPE_PAGE:
105 Assert(pMemNt->Core.pv);
106 ExFreePool(pMemNt->Core.pv);
107 pMemNt->Core.pv = NULL;
108
109 Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
110 IoFreeMdl(pMemNt->apMdls[0]);
111 pMemNt->apMdls[0] = NULL;
112 pMemNt->cMdls = 0;
113 break;
114
115 case RTR0MEMOBJTYPE_CONT:
116 Assert(pMemNt->Core.pv);
117 MmFreeContiguousMemory(pMemNt->Core.pv);
118 pMemNt->Core.pv = NULL;
119
120 Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
121 IoFreeMdl(pMemNt->apMdls[0]);
122 pMemNt->apMdls[0] = NULL;
123 pMemNt->cMdls = 0;
124 break;
125
126 case RTR0MEMOBJTYPE_PHYS:
127 case RTR0MEMOBJTYPE_PHYS_NC:
128#ifndef IPRT_TARGET_NT4
129 if (pMemNt->fAllocatedPagesForMdl)
130 {
131 MmFreePagesFromMdl(pMemNt->apMdls[0]);
132 ExFreePool(pMemNt->apMdls[0]);
133 pMemNt->apMdls[0] = NULL;
134 pMemNt->cMdls = 0;
135 break;
136 }
137#endif
138 AssertFailed();
139 break;
140
141 case RTR0MEMOBJTYPE_LOCK:
142 if (pMemNt->pvSecureMem)
143 {
144 MmUnsecureVirtualMemory(pMemNt->pvSecureMem);
145 pMemNt->pvSecureMem = NULL;
146 }
147 for (uint32_t i = 0; i < pMemNt->cMdls; i++)
148 {
149 MmUnlockPages(pMemNt->apMdls[i]);
150 IoFreeMdl(pMemNt->apMdls[i]);
151 pMemNt->apMdls[i] = NULL;
152 }
153 break;
154
155 case RTR0MEMOBJTYPE_RES_VIRT:
156/* if (pMemNt->Core.u.ResVirt.R0Process == NIL_RTR0PROCESS)
157 {
158 }
159 else
160 {
161 }*/
162 AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n"));
163 return VERR_INTERNAL_ERROR;
164 break;
165
166 case RTR0MEMOBJTYPE_MAPPING:
167 {
168 Assert(pMemNt->cMdls == 0 && pMemNt->Core.pv);
169 PRTR0MEMOBJNT pMemNtParent = (PRTR0MEMOBJNT)pMemNt->Core.uRel.Child.pParent;
170 Assert(pMemNtParent);
171 if (pMemNtParent->cMdls)
172 {
173 Assert(pMemNtParent->cMdls == 1 && pMemNtParent->apMdls[0]);
174 Assert( pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS
175 || pMemNt->Core.u.Mapping.R0Process == RTR0ProcHandleSelf());
176 MmUnmapLockedPages(pMemNt->Core.pv, pMemNtParent->apMdls[0]);
177 }
178 else
179 {
180 Assert( pMemNtParent->Core.enmType == RTR0MEMOBJTYPE_PHYS
181 && !pMemNtParent->Core.u.Phys.fAllocated);
182 Assert(pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS);
183 MmUnmapIoSpace(pMemNt->Core.pv, pMemNt->Core.cb);
184 }
185 pMemNt->Core.pv = NULL;
186 break;
187 }
188
189 default:
190 AssertMsgFailed(("enmType=%d\n", pMemNt->Core.enmType));
191 return VERR_INTERNAL_ERROR;
192 }
193
194 return VINF_SUCCESS;
195}
196
197
198int rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
199{
200 AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
201
202 /*
203 * Try allocate the memory and create an MDL for them so
204 * we can query the physical addresses and do mappings later
205 * without running into out-of-memory conditions and similar problems.
206 */
207 int rc = VERR_NO_PAGE_MEMORY;
208 void *pv = ExAllocatePoolWithTag(NonPagedPool, cb, IPRT_NT_POOL_TAG);
209 if (pv)
210 {
211 PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
212 if (pMdl)
213 {
214 MmBuildMdlForNonPagedPool(pMdl);
215#ifdef RT_ARCH_AMD64
216 MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
217#endif
218
219 /*
220 * Create the IPRT memory object.
221 */
222 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PAGE, pv, cb);
223 if (pMemNt)
224 {
225 pMemNt->cMdls = 1;
226 pMemNt->apMdls[0] = pMdl;
227 *ppMem = &pMemNt->Core;
228 return VINF_SUCCESS;
229 }
230
231 rc = VERR_NO_MEMORY;
232 IoFreeMdl(pMdl);
233 }
234 ExFreePool(pv);
235 }
236 return rc;
237}
238
239
240int rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
241{
242 AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
243
244 /*
245 * Try see if we get lucky first...
246 * (We could probably just assume we're lucky on NT4.)
247 */
248 int rc = rtR0MemObjNativeAllocPage(ppMem, cb, fExecutable);
249 if (RT_SUCCESS(rc))
250 {
251 size_t iPage = cb >> PAGE_SHIFT;
252 while (iPage-- > 0)
253 if (rtR0MemObjNativeGetPagePhysAddr(*ppMem, iPage) >= _4G)
254 {
255 rc = VERR_NO_MEMORY;
256 break;
257 }
258 if (RT_SUCCESS(rc))
259 return rc;
260
261 /* The following ASSUMES that rtR0MemObjNativeAllocPage returns a completed object. */
262 RTR0MemObjFree(*ppMem, false);
263 *ppMem = NULL;
264 }
265
266#ifndef IPRT_TARGET_NT4
267 /*
268 * Use MmAllocatePagesForMdl to specify the range of physical addresses we wish to use.
269 */
270 PHYSICAL_ADDRESS Zero;
271 Zero.QuadPart = 0;
272 PHYSICAL_ADDRESS HighAddr;
273 HighAddr.QuadPart = _4G - 1;
274 PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
275 if (pMdl)
276 {
277 if (MmGetMdlByteCount(pMdl) >= cb)
278 {
279 __try
280 {
281 void *pv = MmMapLockedPagesSpecifyCache(pMdl, KernelMode, MmCached, NULL /* no base address */,
282 FALSE /* no bug check on failure */, NormalPagePriority);
283 if (pv)
284 {
285 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_LOW, pv, cb);
286 if (pMemNt)
287 {
288 pMemNt->fAllocatedPagesForMdl = true;
289 pMemNt->cMdls = 1;
290 pMemNt->apMdls[0] = pMdl;
291 *ppMem = &pMemNt->Core;
292 return VINF_SUCCESS;
293 }
294 MmUnmapLockedPages(pv, pMdl);
295 }
296 }
297 __except(EXCEPTION_EXECUTE_HANDLER)
298 {
299 /* nothing */
300 }
301 }
302 MmFreePagesFromMdl(pMdl);
303 ExFreePool(pMdl);
304 }
305#endif /* !IPRT_TARGET_NT4 */
306
307 /*
308 * Fall back on contiguous memory...
309 */
310 return rtR0MemObjNativeAllocCont(ppMem, cb, fExecutable);
311}
312
313
314/**
315 * Internal worker for rtR0MemObjNativeAllocCont(), rtR0MemObjNativeAllocPhys()
316 * and rtR0MemObjNativeAllocPhysNC() that takes a max physical address in addition
317 * to what rtR0MemObjNativeAllocCont() does.
318 *
319 * @returns IPRT status code.
320 * @param ppMem Where to store the pointer to the ring-0 memory object.
321 * @param cb The size.
322 * @param fExecutable Whether the mapping should be executable or not.
323 * @param PhysHighest The highest physical address for the pages in allocation.
324 */
325static int rtR0MemObjNativeAllocContEx(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, RTHCPHYS PhysHighest)
326{
327 AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
328
329 /*
330 * Allocate the memory and create an MDL for it.
331 */
332 PHYSICAL_ADDRESS PhysAddrHighest;
333 PhysAddrHighest.QuadPart = PhysHighest;
334 void *pv = MmAllocateContiguousMemory(cb, PhysAddrHighest);
335 if (!pv)
336 return VERR_NO_MEMORY;
337
338 PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
339 if (pMdl)
340 {
341 MmBuildMdlForNonPagedPool(pMdl);
342#ifdef RT_ARCH_AMD64
343 MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
344#endif
345
346 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_CONT, pv, cb);
347 if (pMemNt)
348 {
349 pMemNt->Core.u.Cont.Phys = (RTHCPHYS)*MmGetMdlPfnArray(pMdl) << PAGE_SHIFT;
350 pMemNt->cMdls = 1;
351 pMemNt->apMdls[0] = pMdl;
352 *ppMem = &pMemNt->Core;
353 return VINF_SUCCESS;
354 }
355
356 IoFreeMdl(pMdl);
357 }
358 MmFreeContiguousMemory(pv);
359 return VERR_NO_MEMORY;
360}
361
362
363int rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
364{
365 return rtR0MemObjNativeAllocContEx(ppMem, cb, fExecutable, _4G-1);
366}
367
368
369int rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
370{
371#ifndef IPRT_TARGET_NT4
372 /*
373 * Try and see if we're lucky and get a contiguous chunk from MmAllocatePagesForMdl.
374 *
375 * This is preferable to using MmAllocateContiguousMemory because there are
376 * a few situations where the memory shouldn't be mapped, like for instance
377 * VT-x control memory. Since these are rather small allocations (one or
378 * two pages) MmAllocatePagesForMdl will probably be able to satisfy the
379 * request.
380 *
381 * If the allocation is big, the chances are *probably* not very good. The
382 * current limit is kind of random...
383 */
384 if (cb < _128K)
385 {
386 PHYSICAL_ADDRESS Zero;
387 Zero.QuadPart = 0;
388 PHYSICAL_ADDRESS HighAddr;
389 HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
390 PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
391 if (pMdl)
392 {
393 if (MmGetMdlByteCount(pMdl) >= cb)
394 {
395 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMdl);
396 PFN_NUMBER Pfn = paPfns[0] + 1;
397 const size_t cPages = cb >> PAGE_SHIFT;
398 size_t iPage;
399 for (iPage = 1; iPage < cPages; iPage++, Pfn++)
400 if (paPfns[iPage] != Pfn)
401 break;
402 if (iPage >= cPages)
403 {
404 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb);
405 if (pMemNt)
406 {
407 pMemNt->Core.u.Phys.fAllocated = true;
408 pMemNt->Core.u.Phys.PhysBase = (RTHCPHYS)paPfns[0] << PAGE_SHIFT;
409 pMemNt->fAllocatedPagesForMdl = true;
410 pMemNt->cMdls = 1;
411 pMemNt->apMdls[0] = pMdl;
412 *ppMem = &pMemNt->Core;
413 return VINF_SUCCESS;
414 }
415 }
416 }
417 MmFreePagesFromMdl(pMdl);
418 ExFreePool(pMdl);
419 }
420 }
421#endif /* !IPRT_TARGET_NT4 */
422
423 return rtR0MemObjNativeAllocContEx(ppMem, cb, false, PhysHighest);
424}
425
426
427int rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
428{
429#ifndef IPRT_TARGET_NT4
430 PHYSICAL_ADDRESS Zero;
431 Zero.QuadPart = 0;
432 PHYSICAL_ADDRESS HighAddr;
433 HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
434 PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
435 if (pMdl)
436 {
437 if (MmGetMdlByteCount(pMdl) >= cb)
438 {
439 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS_NC, NULL, cb);
440 if (pMemNt)
441 {
442 pMemNt->fAllocatedPagesForMdl = true;
443 pMemNt->cMdls = 1;
444 pMemNt->apMdls[0] = pMdl;
445 *ppMem = &pMemNt->Core;
446 return VINF_SUCCESS;
447 }
448 }
449 MmFreePagesFromMdl(pMdl);
450 ExFreePool(pMdl);
451 }
452 return VERR_NO_MEMORY;
453#else /* IPRT_TARGET_NT4 */
454 return VERR_NOT_SUPPORTED;
455#endif /* IPRT_TARGET_NT4 */
456}
457
458
459int rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb)
460{
461 /*
462 * Validate the address range and create a descriptor for it.
463 */
464 PFN_NUMBER Pfn = (PFN_NUMBER)(Phys >> PAGE_SHIFT);
465 if (((RTHCPHYS)Pfn << PAGE_SHIFT) != Phys)
466 return VERR_ADDRESS_TOO_BIG;
467
468 /*
469 * Create the IPRT memory object.
470 */
471 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb);
472 if (pMemNt)
473 {
474 pMemNt->Core.u.Phys.PhysBase = Phys;
475 pMemNt->Core.u.Phys.fAllocated = false;
476 *ppMem = &pMemNt->Core;
477 return VINF_SUCCESS;
478 }
479 return VERR_NO_MEMORY;
480}
481
482
483/**
484 * Internal worker for locking down pages.
485 *
486 * @return IPRT status code.
487 *
488 * @param ppMem Where to store the memory object pointer.
489 * @param pv First page.
490 * @param cb Number of bytes.
491 * @param Task The task \a pv and \a cb refers to.
492 */
493static int rtR0MemObjNtLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, RTR0PROCESS R0Process)
494{
495 /*
496 * Calc the number of MDLs we need and allocate the memory object structure.
497 */
498 size_t cMdls = cb / MAX_LOCK_MEM_SIZE;
499 if (cb % MAX_LOCK_MEM_SIZE)
500 cMdls++;
501 if (cMdls >= UINT32_MAX)
502 return VERR_OUT_OF_RANGE;
503 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJNT, apMdls[cMdls]),
504 RTR0MEMOBJTYPE_LOCK, pv, cb);
505 if (!pMemNt)
506 return VERR_NO_MEMORY;
507
508 /*
509 * Loop locking down the sub parts of the memory.
510 */
511 int rc = VINF_SUCCESS;
512 size_t cbTotal = 0;
513 uint8_t *pb = (uint8_t *)pv;
514 uint32_t iMdl;
515 for (iMdl = 0; iMdl < cMdls; iMdl++)
516 {
517 /*
518 * Calc the Mdl size and allocate it.
519 */
520 size_t cbCur = cb - cbTotal;
521 if (cbCur > MAX_LOCK_MEM_SIZE)
522 cbCur = MAX_LOCK_MEM_SIZE;
523 AssertMsg(cbCur, ("cbCur: 0!\n"));
524 PMDL pMdl = IoAllocateMdl(pb, (ULONG)cbCur, FALSE, FALSE, NULL);
525 if (!pMdl)
526 {
527 rc = VERR_NO_MEMORY;
528 break;
529 }
530
531 /*
532 * Lock the pages.
533 */
534 __try
535 {
536 MmProbeAndLockPages(pMdl, R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode, IoModifyAccess);
537
538 pMemNt->apMdls[iMdl] = pMdl;
539 pMemNt->cMdls++;
540 }
541 __except(EXCEPTION_EXECUTE_HANDLER)
542 {
543 IoFreeMdl(pMdl);
544 rc = VERR_LOCK_FAILED;
545 break;
546 }
547
548 if (R0Process != NIL_RTR0PROCESS )
549 {
550 /* Make sure the user process can't change the allocation. */
551 pMemNt->pvSecureMem = MmSecureVirtualMemory(pv, cb, PAGE_READWRITE);
552 if (!pMemNt->pvSecureMem)
553 {
554 rc = VERR_NO_MEMORY;
555 break;
556 }
557 }
558
559 /* next */
560 cbTotal += cbCur;
561 pb += cbCur;
562 }
563 if (RT_SUCCESS(rc))
564 {
565 Assert(pMemNt->cMdls == cMdls);
566 pMemNt->Core.u.Lock.R0Process = R0Process;
567 *ppMem = &pMemNt->Core;
568 return rc;
569 }
570
571 /*
572 * We failed, perform cleanups.
573 */
574 while (iMdl-- > 0)
575 {
576 MmUnlockPages(pMemNt->apMdls[iMdl]);
577 IoFreeMdl(pMemNt->apMdls[iMdl]);
578 pMemNt->apMdls[iMdl] = NULL;
579 }
580 if (pMemNt->pvSecureMem)
581 {
582 MmUnsecureVirtualMemory(pMemNt->pvSecureMem);
583 pMemNt->pvSecureMem = NULL;
584 }
585
586 rtR0MemObjDelete(&pMemNt->Core);
587 return rc;
588}
589
590
591int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, RTR0PROCESS R0Process)
592{
593 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
594 /* (Can use MmProbeAndLockProcessPages if we need to mess with other processes later.) */
595 return rtR0MemObjNtLock(ppMem, (void *)R3Ptr, cb, R0Process);
596}
597
598
599int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb)
600{
601 return rtR0MemObjNtLock(ppMem, pv, cb, NIL_RTR0PROCESS);
602}
603
604
605int rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
606{
607 /*
608 * MmCreateSection(SEC_RESERVE) + MmMapViewInSystemSpace perhaps?
609 */
610 return VERR_NOT_IMPLEMENTED;
611}
612
613
614int rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
615{
616 /*
617 * ZeCreateSection(SEC_RESERVE) + ZwMapViewOfSection perhaps?
618 */
619 return VERR_NOT_IMPLEMENTED;
620}
621
622
623/**
624 * Internal worker for rtR0MemObjNativeMapKernel and rtR0MemObjNativeMapUser.
625 *
626 * @returns IPRT status code.
627 * @param ppMem Where to store the memory object for the mapping.
628 * @param pMemToMap The memory object to map.
629 * @param pvFixed Where to map it. (void *)-1 if anywhere is fine.
630 * @param uAlignment The alignment requirement for the mapping.
631 * @param fProt The desired page protection for the mapping.
632 * @param R0Process If NIL_RTR0PROCESS map into system (kernel) memory.
633 * If not nil, it's the current process.
634 */
635static int rtR0MemObjNtMap(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
636 unsigned fProt, RTR0PROCESS R0Process)
637{
638 int rc = VERR_MAP_FAILED;
639
640 /*
641 * There are two basic cases here, either we've got an MDL and can
642 * map it using MmMapLockedPages, or we've got a contiguous physical
643 * range (MMIO most likely) and can use MmMapIoSpace.
644 */
645 PRTR0MEMOBJNT pMemNtToMap = (PRTR0MEMOBJNT)pMemToMap;
646 if (pMemNtToMap->cMdls)
647 {
648 /* don't attempt map locked regions with more than one mdl. */
649 if (pMemNtToMap->cMdls != 1)
650 return VERR_NOT_SUPPORTED;
651
652 /* we can't map anything to the first page, sorry. */
653 if (pvFixed == 0)
654 return VERR_NOT_SUPPORTED;
655
656 /* only one system mapping for now - no time to figure out MDL restrictions right now. */
657 if ( pMemNtToMap->Core.uRel.Parent.cMappings
658 && R0Process == NIL_RTR0PROCESS)
659 return VERR_NOT_SUPPORTED;
660
661 __try
662 {
663 /** @todo uAlignment */
664 /** @todo How to set the protection on the pages? */
665 void *pv = MmMapLockedPagesSpecifyCache(pMemNtToMap->apMdls[0],
666 R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode,
667 MmCached,
668 pvFixed != (void *)-1 ? pvFixed : NULL,
669 FALSE /* no bug check on failure */,
670 NormalPagePriority);
671 if (pv)
672 {
673 NOREF(fProt);
674
675 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING, pv,
676 pMemNtToMap->Core.cb);
677 if (pMemNt)
678 {
679 pMemNt->Core.u.Mapping.R0Process = R0Process;
680 *ppMem = &pMemNt->Core;
681 return VINF_SUCCESS;
682 }
683
684 rc = VERR_NO_MEMORY;
685 MmUnmapLockedPages(pv, pMemNtToMap->apMdls[0]);
686 }
687 }
688 __except(EXCEPTION_EXECUTE_HANDLER)
689 {
690 /* nothing */
691 rc = VERR_MAP_FAILED;
692 }
693
694 }
695 else
696 {
697 AssertReturn( pMemNtToMap->Core.enmType == RTR0MEMOBJTYPE_PHYS
698 && !pMemNtToMap->Core.u.Phys.fAllocated, VERR_INTERNAL_ERROR);
699
700 /* cannot map phys mem to user space (yet). */
701 if (R0Process != NIL_RTR0PROCESS)
702 return VERR_NOT_SUPPORTED;
703
704 /** @todo uAlignment */
705 /** @todo How to set the protection on the pages? */
706 PHYSICAL_ADDRESS Phys;
707 Phys.QuadPart = pMemNtToMap->Core.u.Phys.PhysBase;
708 void *pv = MmMapIoSpace(Phys, pMemNtToMap->Core.cb, MmCached); /** @todo add cache type to fProt. */
709 if (pv)
710 {
711 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING, pv,
712 pMemNtToMap->Core.cb);
713 if (pMemNt)
714 {
715 pMemNt->Core.u.Mapping.R0Process = R0Process;
716 *ppMem = &pMemNt->Core;
717 return VINF_SUCCESS;
718 }
719
720 rc = VERR_NO_MEMORY;
721 MmUnmapIoSpace(pv, pMemNtToMap->Core.cb);
722 }
723 }
724
725 NOREF(uAlignment); NOREF(fProt);
726 return rc;
727}
728
729
730int rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment, unsigned fProt)
731{
732 return rtR0MemObjNtMap(ppMem, pMemToMap, pvFixed, uAlignment, fProt, NIL_RTR0PROCESS);
733}
734
735
736int rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
737{
738 AssertReturn(R0Process == RTR0ProcHandleSelf(), VERR_NOT_SUPPORTED);
739 return rtR0MemObjNtMap(ppMem, pMemToMap, (void *)R3PtrFixed, uAlignment, fProt, R0Process);
740}
741
742
743RTHCPHYS rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
744{
745 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
746
747 if (pMemNt->cMdls)
748 {
749 if (pMemNt->cMdls == 1)
750 {
751 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[0]);
752 return (RTHCPHYS)paPfns[iPage] << PAGE_SHIFT;
753 }
754
755 size_t iMdl = iPage / (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
756 size_t iMdlPfn = iPage % (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
757 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[iMdl]);
758 return (RTHCPHYS)paPfns[iMdlPfn] << PAGE_SHIFT;
759 }
760
761 switch (pMemNt->Core.enmType)
762 {
763 case RTR0MEMOBJTYPE_MAPPING:
764 return rtR0MemObjNativeGetPagePhysAddr(pMemNt->Core.uRel.Child.pParent, iPage);
765
766 case RTR0MEMOBJTYPE_PHYS:
767 return pMemNt->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT);
768
769 case RTR0MEMOBJTYPE_PAGE:
770 case RTR0MEMOBJTYPE_PHYS_NC:
771 case RTR0MEMOBJTYPE_LOW:
772 case RTR0MEMOBJTYPE_CONT:
773 case RTR0MEMOBJTYPE_LOCK:
774 default:
775 AssertMsgFailed(("%d\n", pMemNt->Core.enmType));
776 case RTR0MEMOBJTYPE_RES_VIRT:
777 return NIL_RTHCPHYS;
778 }
779}
780
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette