VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/nt/memobj-r0drv-nt.cpp@ 29698

Last change on this file since 29698 was 29027, checked in by vboxsync, 15 years ago

RTR0MemObjEnterPhys/rtR0MemObjNativeEnterPhys: Validate the cache policy in the common code. Use uint32_t as parameter type. All native implementations must set the policy member.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 28.4 KB
Line 
1/* $Id: memobj-r0drv-nt.cpp 29027 2010-05-04 14:33:41Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, NT.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*******************************************************************************
29* Header Files *
30*******************************************************************************/
31#include "the-nt-kernel.h"
32
33#include <iprt/memobj.h>
34#include <iprt/alloc.h>
35#include <iprt/assert.h>
36#include <iprt/log.h>
37#include <iprt/param.h>
38#include <iprt/string.h>
39#include <iprt/process.h>
40#include "internal/memobj.h"
41
42
43/*******************************************************************************
44* Defined Constants And Macros *
45*******************************************************************************/
46/** Maximum number of bytes we try to lock down in one go.
47 * This is supposed to have a limit right below 256MB, but this appears
48 * to actually be much lower. The values here have been determined experimentally.
49 */
50#ifdef RT_ARCH_X86
51# define MAX_LOCK_MEM_SIZE (32*1024*1024) /* 32MB */
52#endif
53#ifdef RT_ARCH_AMD64
54# define MAX_LOCK_MEM_SIZE (24*1024*1024) /* 24MB */
55#endif
56
57
58/*******************************************************************************
59* Structures and Typedefs *
60*******************************************************************************/
61/**
62 * The NT version of the memory object structure.
63 */
64typedef struct RTR0MEMOBJNT
65{
66 /** The core structure. */
67 RTR0MEMOBJINTERNAL Core;
68#ifndef IPRT_TARGET_NT4
69 /** Used MmAllocatePagesForMdl(). */
70 bool fAllocatedPagesForMdl;
71#endif
72 /** Pointer returned by MmSecureVirtualMemory */
73 PVOID pvSecureMem;
74 /** The number of PMDLs (memory descriptor lists) in the array. */
75 uint32_t cMdls;
76 /** Array of MDL pointers. (variable size) */
77 PMDL apMdls[1];
78} RTR0MEMOBJNT, *PRTR0MEMOBJNT;
79
80
81int rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
82{
83 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
84
85 /*
86 * Deal with it on a per type basis (just as a variation).
87 */
88 switch (pMemNt->Core.enmType)
89 {
90 case RTR0MEMOBJTYPE_LOW:
91#ifndef IPRT_TARGET_NT4
92 if (pMemNt->fAllocatedPagesForMdl)
93 {
94 Assert(pMemNt->Core.pv && pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
95 MmUnmapLockedPages(pMemNt->Core.pv, pMemNt->apMdls[0]);
96 pMemNt->Core.pv = NULL;
97 if (pMemNt->pvSecureMem)
98 {
99 MmUnsecureVirtualMemory(pMemNt->pvSecureMem);
100 pMemNt->pvSecureMem = NULL;
101 }
102
103 MmFreePagesFromMdl(pMemNt->apMdls[0]);
104 ExFreePool(pMemNt->apMdls[0]);
105 pMemNt->apMdls[0] = NULL;
106 pMemNt->cMdls = 0;
107 break;
108 }
109#endif
110 AssertFailed();
111 break;
112
113 case RTR0MEMOBJTYPE_PAGE:
114 Assert(pMemNt->Core.pv);
115 ExFreePool(pMemNt->Core.pv);
116 pMemNt->Core.pv = NULL;
117
118 Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
119 IoFreeMdl(pMemNt->apMdls[0]);
120 pMemNt->apMdls[0] = NULL;
121 pMemNt->cMdls = 0;
122 break;
123
124 case RTR0MEMOBJTYPE_CONT:
125 Assert(pMemNt->Core.pv);
126 MmFreeContiguousMemory(pMemNt->Core.pv);
127 pMemNt->Core.pv = NULL;
128
129 Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
130 IoFreeMdl(pMemNt->apMdls[0]);
131 pMemNt->apMdls[0] = NULL;
132 pMemNt->cMdls = 0;
133 break;
134
135 case RTR0MEMOBJTYPE_PHYS:
136 case RTR0MEMOBJTYPE_PHYS_NC:
137#ifndef IPRT_TARGET_NT4
138 if (pMemNt->fAllocatedPagesForMdl)
139 {
140 MmFreePagesFromMdl(pMemNt->apMdls[0]);
141 ExFreePool(pMemNt->apMdls[0]);
142 pMemNt->apMdls[0] = NULL;
143 pMemNt->cMdls = 0;
144 break;
145 }
146#endif
147 AssertFailed();
148 break;
149
150 case RTR0MEMOBJTYPE_LOCK:
151 if (pMemNt->pvSecureMem)
152 {
153 MmUnsecureVirtualMemory(pMemNt->pvSecureMem);
154 pMemNt->pvSecureMem = NULL;
155 }
156 for (uint32_t i = 0; i < pMemNt->cMdls; i++)
157 {
158 MmUnlockPages(pMemNt->apMdls[i]);
159 IoFreeMdl(pMemNt->apMdls[i]);
160 pMemNt->apMdls[i] = NULL;
161 }
162 break;
163
164 case RTR0MEMOBJTYPE_RES_VIRT:
165/* if (pMemNt->Core.u.ResVirt.R0Process == NIL_RTR0PROCESS)
166 {
167 }
168 else
169 {
170 }*/
171 AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n"));
172 return VERR_INTERNAL_ERROR;
173 break;
174
175 case RTR0MEMOBJTYPE_MAPPING:
176 {
177 Assert(pMemNt->cMdls == 0 && pMemNt->Core.pv);
178 PRTR0MEMOBJNT pMemNtParent = (PRTR0MEMOBJNT)pMemNt->Core.uRel.Child.pParent;
179 Assert(pMemNtParent);
180 if (pMemNtParent->cMdls)
181 {
182 Assert(pMemNtParent->cMdls == 1 && pMemNtParent->apMdls[0]);
183 Assert( pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS
184 || pMemNt->Core.u.Mapping.R0Process == RTR0ProcHandleSelf());
185 MmUnmapLockedPages(pMemNt->Core.pv, pMemNtParent->apMdls[0]);
186 }
187 else
188 {
189 Assert( pMemNtParent->Core.enmType == RTR0MEMOBJTYPE_PHYS
190 && !pMemNtParent->Core.u.Phys.fAllocated);
191 Assert(pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS);
192 MmUnmapIoSpace(pMemNt->Core.pv, pMemNt->Core.cb);
193 }
194 pMemNt->Core.pv = NULL;
195 break;
196 }
197
198 default:
199 AssertMsgFailed(("enmType=%d\n", pMemNt->Core.enmType));
200 return VERR_INTERNAL_ERROR;
201 }
202
203 return VINF_SUCCESS;
204}
205
206
207int rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
208{
209 AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
210
211 /*
212 * Try allocate the memory and create an MDL for them so
213 * we can query the physical addresses and do mappings later
214 * without running into out-of-memory conditions and similar problems.
215 */
216 int rc = VERR_NO_PAGE_MEMORY;
217 void *pv = ExAllocatePoolWithTag(NonPagedPool, cb, IPRT_NT_POOL_TAG);
218 if (pv)
219 {
220 PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
221 if (pMdl)
222 {
223 MmBuildMdlForNonPagedPool(pMdl);
224#ifdef RT_ARCH_AMD64
225 MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
226#endif
227
228 /*
229 * Create the IPRT memory object.
230 */
231 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PAGE, pv, cb);
232 if (pMemNt)
233 {
234 pMemNt->cMdls = 1;
235 pMemNt->apMdls[0] = pMdl;
236 *ppMem = &pMemNt->Core;
237 return VINF_SUCCESS;
238 }
239
240 rc = VERR_NO_MEMORY;
241 IoFreeMdl(pMdl);
242 }
243 ExFreePool(pv);
244 }
245 return rc;
246}
247
248
249int rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
250{
251 AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
252
253 /*
254 * Try see if we get lucky first...
255 * (We could probably just assume we're lucky on NT4.)
256 */
257 int rc = rtR0MemObjNativeAllocPage(ppMem, cb, fExecutable);
258 if (RT_SUCCESS(rc))
259 {
260 size_t iPage = cb >> PAGE_SHIFT;
261 while (iPage-- > 0)
262 if (rtR0MemObjNativeGetPagePhysAddr(*ppMem, iPage) >= _4G)
263 {
264 rc = VERR_NO_MEMORY;
265 break;
266 }
267 if (RT_SUCCESS(rc))
268 return rc;
269
270 /* The following ASSUMES that rtR0MemObjNativeAllocPage returns a completed object. */
271 RTR0MemObjFree(*ppMem, false);
272 *ppMem = NULL;
273 }
274
275#ifndef IPRT_TARGET_NT4
276 /*
277 * Use MmAllocatePagesForMdl to specify the range of physical addresses we wish to use.
278 */
279 PHYSICAL_ADDRESS Zero;
280 Zero.QuadPart = 0;
281 PHYSICAL_ADDRESS HighAddr;
282 HighAddr.QuadPart = _4G - 1;
283 PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
284 if (pMdl)
285 {
286 if (MmGetMdlByteCount(pMdl) >= cb)
287 {
288 __try
289 {
290 void *pv = MmMapLockedPagesSpecifyCache(pMdl, KernelMode, MmCached, NULL /* no base address */,
291 FALSE /* no bug check on failure */, NormalPagePriority);
292 if (pv)
293 {
294 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_LOW, pv, cb);
295 if (pMemNt)
296 {
297 pMemNt->fAllocatedPagesForMdl = true;
298 pMemNt->cMdls = 1;
299 pMemNt->apMdls[0] = pMdl;
300 *ppMem = &pMemNt->Core;
301 return VINF_SUCCESS;
302 }
303 MmUnmapLockedPages(pv, pMdl);
304 }
305 }
306 __except(EXCEPTION_EXECUTE_HANDLER)
307 {
308 NTSTATUS rcNt = GetExceptionCode();
309 Log(("rtR0MemObjNativeAllocLow: Exception Code %#x\n", rcNt));
310 /* nothing */
311 }
312 }
313 MmFreePagesFromMdl(pMdl);
314 ExFreePool(pMdl);
315 }
316#endif /* !IPRT_TARGET_NT4 */
317
318 /*
319 * Fall back on contiguous memory...
320 */
321 return rtR0MemObjNativeAllocCont(ppMem, cb, fExecutable);
322}
323
324
325/**
326 * Internal worker for rtR0MemObjNativeAllocCont(), rtR0MemObjNativeAllocPhys()
327 * and rtR0MemObjNativeAllocPhysNC() that takes a max physical address in addition
328 * to what rtR0MemObjNativeAllocCont() does.
329 *
330 * @returns IPRT status code.
331 * @param ppMem Where to store the pointer to the ring-0 memory object.
332 * @param cb The size.
333 * @param fExecutable Whether the mapping should be executable or not.
334 * @param PhysHighest The highest physical address for the pages in allocation.
335 * @param uAlignment The alignment of the physical memory to allocate.
336 * Supported values are PAGE_SIZE, _2M, _4M and _1G.
337 */
338static int rtR0MemObjNativeAllocContEx(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, RTHCPHYS PhysHighest,
339 size_t uAlignment)
340{
341 AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
342#ifdef TARGET_NT4
343 if (uAlignment != PAGE_SIZE)
344 return VERR_NOT_SUPPORTED;
345#endif
346
347 /*
348 * Allocate the memory and create an MDL for it.
349 */
350 PHYSICAL_ADDRESS PhysAddrHighest;
351 PhysAddrHighest.QuadPart = PhysHighest;
352#ifndef TARGET_NT4
353 PHYSICAL_ADDRESS PhysAddrLowest, PhysAddrBoundary;
354 PhysAddrLowest.QuadPart = 0;
355 PhysAddrBoundary.QuadPart = (uAlignment == PAGE_SIZE) ? 0 : uAlignment;
356 void *pv = MmAllocateContiguousMemorySpecifyCache(cb, PhysAddrLowest, PhysAddrHighest, PhysAddrBoundary, MmCached);
357#else
358 void *pv = MmAllocateContiguousMemory(cb, PhysAddrHighest);
359#endif
360 if (!pv)
361 return VERR_NO_MEMORY;
362
363 PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
364 if (pMdl)
365 {
366 MmBuildMdlForNonPagedPool(pMdl);
367#ifdef RT_ARCH_AMD64
368 MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
369#endif
370
371 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_CONT, pv, cb);
372 if (pMemNt)
373 {
374 pMemNt->Core.u.Cont.Phys = (RTHCPHYS)*MmGetMdlPfnArray(pMdl) << PAGE_SHIFT;
375 pMemNt->cMdls = 1;
376 pMemNt->apMdls[0] = pMdl;
377 *ppMem = &pMemNt->Core;
378 return VINF_SUCCESS;
379 }
380
381 IoFreeMdl(pMdl);
382 }
383 MmFreeContiguousMemory(pv);
384 return VERR_NO_MEMORY;
385}
386
387
388int rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
389{
390 return rtR0MemObjNativeAllocContEx(ppMem, cb, fExecutable, _4G-1, PAGE_SIZE /* alignment */);
391}
392
393
394int rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
395{
396#ifndef IPRT_TARGET_NT4
397 /*
398 * Try and see if we're lucky and get a contiguous chunk from MmAllocatePagesForMdl.
399 *
400 * This is preferable to using MmAllocateContiguousMemory because there are
401 * a few situations where the memory shouldn't be mapped, like for instance
402 * VT-x control memory. Since these are rather small allocations (one or
403 * two pages) MmAllocatePagesForMdl will probably be able to satisfy the
404 * request.
405 *
406 * If the allocation is big, the chances are *probably* not very good. The
407 * current limit is kind of random...
408 */
409 if ( cb < _128K
410 && uAlignment == PAGE_SIZE)
411
412 {
413 PHYSICAL_ADDRESS Zero;
414 Zero.QuadPart = 0;
415 PHYSICAL_ADDRESS HighAddr;
416 HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
417 PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
418 if (pMdl)
419 {
420 if (MmGetMdlByteCount(pMdl) >= cb)
421 {
422 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMdl);
423 PFN_NUMBER Pfn = paPfns[0] + 1;
424 const size_t cPages = cb >> PAGE_SHIFT;
425 size_t iPage;
426 for (iPage = 1; iPage < cPages; iPage++, Pfn++)
427 if (paPfns[iPage] != Pfn)
428 break;
429 if (iPage >= cPages)
430 {
431 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb);
432 if (pMemNt)
433 {
434 pMemNt->Core.u.Phys.fAllocated = true;
435 pMemNt->Core.u.Phys.PhysBase = (RTHCPHYS)paPfns[0] << PAGE_SHIFT;
436 pMemNt->fAllocatedPagesForMdl = true;
437 pMemNt->cMdls = 1;
438 pMemNt->apMdls[0] = pMdl;
439 *ppMem = &pMemNt->Core;
440 return VINF_SUCCESS;
441 }
442 }
443 }
444 MmFreePagesFromMdl(pMdl);
445 ExFreePool(pMdl);
446 }
447 }
448#endif /* !IPRT_TARGET_NT4 */
449
450 return rtR0MemObjNativeAllocContEx(ppMem, cb, false, PhysHighest, uAlignment);
451}
452
453
454int rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
455{
456#ifndef IPRT_TARGET_NT4
457 PHYSICAL_ADDRESS Zero;
458 Zero.QuadPart = 0;
459 PHYSICAL_ADDRESS HighAddr;
460 HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
461 PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
462 if (pMdl)
463 {
464 if (MmGetMdlByteCount(pMdl) >= cb)
465 {
466 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS_NC, NULL, cb);
467 if (pMemNt)
468 {
469 pMemNt->fAllocatedPagesForMdl = true;
470 pMemNt->cMdls = 1;
471 pMemNt->apMdls[0] = pMdl;
472 *ppMem = &pMemNt->Core;
473 return VINF_SUCCESS;
474 }
475 }
476 MmFreePagesFromMdl(pMdl);
477 ExFreePool(pMdl);
478 }
479 return VERR_NO_MEMORY;
480#else /* IPRT_TARGET_NT4 */
481 return VERR_NOT_SUPPORTED;
482#endif /* IPRT_TARGET_NT4 */
483}
484
485
486int rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
487{
488 AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE, VERR_NOT_IMPLEMENTED);
489
490 /*
491 * Validate the address range and create a descriptor for it.
492 */
493 PFN_NUMBER Pfn = (PFN_NUMBER)(Phys >> PAGE_SHIFT);
494 if (((RTHCPHYS)Pfn << PAGE_SHIFT) != Phys)
495 return VERR_ADDRESS_TOO_BIG;
496
497 /*
498 * Create the IPRT memory object.
499 */
500 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb);
501 if (pMemNt)
502 {
503 pMemNt->Core.u.Phys.PhysBase = Phys;
504 pMemNt->Core.u.Phys.fAllocated = false;
505 pMemNt->Core.u.Phys.uCachePolicy = uCachePolicy;
506 *ppMem = &pMemNt->Core;
507 return VINF_SUCCESS;
508 }
509 return VERR_NO_MEMORY;
510}
511
512
513/**
514 * Internal worker for locking down pages.
515 *
516 * @return IPRT status code.
517 *
518 * @param ppMem Where to store the memory object pointer.
519 * @param pv First page.
520 * @param cb Number of bytes.
521 * @param fAccess The desired access, a combination of RTMEM_PROT_READ
522 * and RTMEM_PROT_WRITE.
523 * @param R0Process The process \a pv and \a cb refers to.
524 */
525static int rtR0MemObjNtLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
526{
527 /*
528 * Calc the number of MDLs we need and allocate the memory object structure.
529 */
530 size_t cMdls = cb / MAX_LOCK_MEM_SIZE;
531 if (cb % MAX_LOCK_MEM_SIZE)
532 cMdls++;
533 if (cMdls >= UINT32_MAX)
534 return VERR_OUT_OF_RANGE;
535 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJNT, apMdls[cMdls]),
536 RTR0MEMOBJTYPE_LOCK, pv, cb);
537 if (!pMemNt)
538 return VERR_NO_MEMORY;
539
540 /*
541 * Loop locking down the sub parts of the memory.
542 */
543 int rc = VINF_SUCCESS;
544 size_t cbTotal = 0;
545 uint8_t *pb = (uint8_t *)pv;
546 uint32_t iMdl;
547 for (iMdl = 0; iMdl < cMdls; iMdl++)
548 {
549 /*
550 * Calc the Mdl size and allocate it.
551 */
552 size_t cbCur = cb - cbTotal;
553 if (cbCur > MAX_LOCK_MEM_SIZE)
554 cbCur = MAX_LOCK_MEM_SIZE;
555 AssertMsg(cbCur, ("cbCur: 0!\n"));
556 PMDL pMdl = IoAllocateMdl(pb, (ULONG)cbCur, FALSE, FALSE, NULL);
557 if (!pMdl)
558 {
559 rc = VERR_NO_MEMORY;
560 break;
561 }
562
563 /*
564 * Lock the pages.
565 */
566 __try
567 {
568 MmProbeAndLockPages(pMdl,
569 R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode,
570 fAccess == RTMEM_PROT_READ
571 ? IoReadAccess
572 : fAccess == RTMEM_PROT_WRITE
573 ? IoWriteAccess
574 : IoModifyAccess);
575
576 pMemNt->apMdls[iMdl] = pMdl;
577 pMemNt->cMdls++;
578 }
579 __except(EXCEPTION_EXECUTE_HANDLER)
580 {
581 IoFreeMdl(pMdl);
582 rc = VERR_LOCK_FAILED;
583 break;
584 }
585
586 if (R0Process != NIL_RTR0PROCESS)
587 {
588 /* Make sure the user process can't change the allocation. */
589 pMemNt->pvSecureMem = MmSecureVirtualMemory(pv, cb,
590 fAccess & RTMEM_PROT_WRITE
591 ? PAGE_READWRITE
592 : PAGE_READONLY);
593 if (!pMemNt->pvSecureMem)
594 {
595 rc = VERR_NO_MEMORY;
596 break;
597 }
598 }
599
600 /* next */
601 cbTotal += cbCur;
602 pb += cbCur;
603 }
604 if (RT_SUCCESS(rc))
605 {
606 Assert(pMemNt->cMdls == cMdls);
607 pMemNt->Core.u.Lock.R0Process = R0Process;
608 *ppMem = &pMemNt->Core;
609 return rc;
610 }
611
612 /*
613 * We failed, perform cleanups.
614 */
615 while (iMdl-- > 0)
616 {
617 MmUnlockPages(pMemNt->apMdls[iMdl]);
618 IoFreeMdl(pMemNt->apMdls[iMdl]);
619 pMemNt->apMdls[iMdl] = NULL;
620 }
621 if (pMemNt->pvSecureMem)
622 {
623 MmUnsecureVirtualMemory(pMemNt->pvSecureMem);
624 pMemNt->pvSecureMem = NULL;
625 }
626
627 rtR0MemObjDelete(&pMemNt->Core);
628 return rc;
629}
630
631
632int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
633{
634 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
635 /* (Can use MmProbeAndLockProcessPages if we need to mess with other processes later.) */
636 return rtR0MemObjNtLock(ppMem, (void *)R3Ptr, cb, fAccess, R0Process);
637}
638
639
640int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
641{
642 return rtR0MemObjNtLock(ppMem, pv, cb, fAccess, NIL_RTR0PROCESS);
643}
644
645
646int rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
647{
648 /*
649 * MmCreateSection(SEC_RESERVE) + MmMapViewInSystemSpace perhaps?
650 */
651 return VERR_NOT_IMPLEMENTED;
652}
653
654
655int rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
656{
657 /*
658 * ZeCreateSection(SEC_RESERVE) + ZwMapViewOfSection perhaps?
659 */
660 return VERR_NOT_IMPLEMENTED;
661}
662
663
664/**
665 * Internal worker for rtR0MemObjNativeMapKernel and rtR0MemObjNativeMapUser.
666 *
667 * @returns IPRT status code.
668 * @param ppMem Where to store the memory object for the mapping.
669 * @param pMemToMap The memory object to map.
670 * @param pvFixed Where to map it. (void *)-1 if anywhere is fine.
671 * @param uAlignment The alignment requirement for the mapping.
672 * @param fProt The desired page protection for the mapping.
673 * @param R0Process If NIL_RTR0PROCESS map into system (kernel) memory.
674 * If not nil, it's the current process.
675 */
676static int rtR0MemObjNtMap(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
677 unsigned fProt, RTR0PROCESS R0Process)
678{
679 int rc = VERR_MAP_FAILED;
680
681 /*
682 * Check that the specified alignment is supported.
683 */
684 if (uAlignment > PAGE_SIZE)
685 return VERR_NOT_SUPPORTED;
686
687 /*
688 * There are two basic cases here, either we've got an MDL and can
689 * map it using MmMapLockedPages, or we've got a contiguous physical
690 * range (MMIO most likely) and can use MmMapIoSpace.
691 */
692 PRTR0MEMOBJNT pMemNtToMap = (PRTR0MEMOBJNT)pMemToMap;
693 if (pMemNtToMap->cMdls)
694 {
695 /* don't attempt map locked regions with more than one mdl. */
696 if (pMemNtToMap->cMdls != 1)
697 return VERR_NOT_SUPPORTED;
698
699#ifdef IPRT_TARGET_NT4
700 /* NT SP0 can't map to a specific address. */
701 if (pvFixed != (void *)-1)
702 return VERR_NOT_SUPPORTED;
703#endif
704
705 /* we can't map anything to the first page, sorry. */
706 if (pvFixed == 0)
707 return VERR_NOT_SUPPORTED;
708
709 /* only one system mapping for now - no time to figure out MDL restrictions right now. */
710 if ( pMemNtToMap->Core.uRel.Parent.cMappings
711 && R0Process == NIL_RTR0PROCESS)
712 return VERR_NOT_SUPPORTED;
713
714 __try
715 {
716 /** @todo uAlignment */
717 /** @todo How to set the protection on the pages? */
718#ifdef IPRT_TARGET_NT4
719 void *pv = MmMapLockedPages(pMemNtToMap->apMdls[0],
720 R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode);
721#else
722 void *pv = MmMapLockedPagesSpecifyCache(pMemNtToMap->apMdls[0],
723 R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode,
724 MmCached,
725 pvFixed != (void *)-1 ? pvFixed : NULL,
726 FALSE /* no bug check on failure */,
727 NormalPagePriority);
728#endif
729 if (pv)
730 {
731 NOREF(fProt);
732
733 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING, pv,
734 pMemNtToMap->Core.cb);
735 if (pMemNt)
736 {
737 pMemNt->Core.u.Mapping.R0Process = R0Process;
738 *ppMem = &pMemNt->Core;
739 return VINF_SUCCESS;
740 }
741
742 rc = VERR_NO_MEMORY;
743 MmUnmapLockedPages(pv, pMemNtToMap->apMdls[0]);
744 }
745 }
746 __except(EXCEPTION_EXECUTE_HANDLER)
747 {
748 NTSTATUS rcNt = GetExceptionCode();
749 Log(("rtR0MemObjNtMap: Exception Code %#x\n", rcNt));
750
751 /* nothing */
752 rc = VERR_MAP_FAILED;
753 }
754
755 }
756 else
757 {
758 AssertReturn( pMemNtToMap->Core.enmType == RTR0MEMOBJTYPE_PHYS
759 && !pMemNtToMap->Core.u.Phys.fAllocated, VERR_INTERNAL_ERROR);
760
761 /* cannot map phys mem to user space (yet). */
762 if (R0Process != NIL_RTR0PROCESS)
763 return VERR_NOT_SUPPORTED;
764
765 /** @todo uAlignment */
766 /** @todo How to set the protection on the pages? */
767 PHYSICAL_ADDRESS Phys;
768 Phys.QuadPart = pMemNtToMap->Core.u.Phys.PhysBase;
769 void *pv = MmMapIoSpace(Phys, pMemNtToMap->Core.cb, MmCached); /** @todo add cache type to fProt. */
770 if (pv)
771 {
772 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING, pv,
773 pMemNtToMap->Core.cb);
774 if (pMemNt)
775 {
776 pMemNt->Core.u.Mapping.R0Process = R0Process;
777 *ppMem = &pMemNt->Core;
778 return VINF_SUCCESS;
779 }
780
781 rc = VERR_NO_MEMORY;
782 MmUnmapIoSpace(pv, pMemNtToMap->Core.cb);
783 }
784 }
785
786 NOREF(uAlignment); NOREF(fProt);
787 return rc;
788}
789
790
791int rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
792 unsigned fProt, size_t offSub, size_t cbSub)
793{
794 AssertMsgReturn(!offSub && !cbSub, ("%#x %#x\n", offSub, cbSub), VERR_NOT_SUPPORTED);
795 return rtR0MemObjNtMap(ppMem, pMemToMap, pvFixed, uAlignment, fProt, NIL_RTR0PROCESS);
796}
797
798
799int rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
800{
801 AssertReturn(R0Process == RTR0ProcHandleSelf(), VERR_NOT_SUPPORTED);
802 return rtR0MemObjNtMap(ppMem, pMemToMap, (void *)R3PtrFixed, uAlignment, fProt, R0Process);
803}
804
805
806int rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
807{
808 NOREF(pMem);
809 NOREF(offSub);
810 NOREF(cbSub);
811 NOREF(fProt);
812 return VERR_NOT_SUPPORTED;
813}
814
815
816RTHCPHYS rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
817{
818 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
819
820 if (pMemNt->cMdls)
821 {
822 if (pMemNt->cMdls == 1)
823 {
824 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[0]);
825 return (RTHCPHYS)paPfns[iPage] << PAGE_SHIFT;
826 }
827
828 size_t iMdl = iPage / (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
829 size_t iMdlPfn = iPage % (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
830 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[iMdl]);
831 return (RTHCPHYS)paPfns[iMdlPfn] << PAGE_SHIFT;
832 }
833
834 switch (pMemNt->Core.enmType)
835 {
836 case RTR0MEMOBJTYPE_MAPPING:
837 return rtR0MemObjNativeGetPagePhysAddr(pMemNt->Core.uRel.Child.pParent, iPage);
838
839 case RTR0MEMOBJTYPE_PHYS:
840 return pMemNt->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT);
841
842 case RTR0MEMOBJTYPE_PAGE:
843 case RTR0MEMOBJTYPE_PHYS_NC:
844 case RTR0MEMOBJTYPE_LOW:
845 case RTR0MEMOBJTYPE_CONT:
846 case RTR0MEMOBJTYPE_LOCK:
847 default:
848 AssertMsgFailed(("%d\n", pMemNt->Core.enmType));
849 case RTR0MEMOBJTYPE_RES_VIRT:
850 return NIL_RTHCPHYS;
851 }
852}
853
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette