VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/nt/memobj-r0drv-nt.cpp@ 70150

Last change on this file since 70150 was 70150, checked in by vboxsync, 7 years ago

iprt/r0drv/memobj: Let initterm resolve MmProtectMdlSystemAddress.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 34.1 KB
Line 
1/* $Id: memobj-r0drv-nt.cpp 70150 2017-12-15 14:10:17Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, NT.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#include "the-nt-kernel.h"
32
33#include <iprt/memobj.h>
34#include <iprt/alloc.h>
35#include <iprt/assert.h>
36#include <iprt/log.h>
37#include <iprt/param.h>
38#include <iprt/string.h>
39#include <iprt/process.h>
40#include "internal/memobj.h"
41
42
43/*********************************************************************************************************************************
44* Defined Constants And Macros *
45*********************************************************************************************************************************/
46/** Maximum number of bytes we try to lock down in one go.
47 * This is supposed to have a limit right below 256MB, but this appears
48 * to actually be much lower. The values here have been determined experimentally.
49 */
50#ifdef RT_ARCH_X86
51# define MAX_LOCK_MEM_SIZE (32*1024*1024) /* 32MB */
52#endif
53#ifdef RT_ARCH_AMD64
54# define MAX_LOCK_MEM_SIZE (24*1024*1024) /* 24MB */
55#endif
56
57
58/*********************************************************************************************************************************
59* Structures and Typedefs *
60*********************************************************************************************************************************/
61/**
62 * The NT version of the memory object structure.
63 */
64typedef struct RTR0MEMOBJNT
65{
66 /** The core structure. */
67 RTR0MEMOBJINTERNAL Core;
68#ifndef IPRT_TARGET_NT4
69 /** Used MmAllocatePagesForMdl(). */
70 bool fAllocatedPagesForMdl;
71#endif
72 /** Pointer returned by MmSecureVirtualMemory */
73 PVOID pvSecureMem;
74 /** The number of PMDLs (memory descriptor lists) in the array. */
75 uint32_t cMdls;
76 /** Array of MDL pointers. (variable size) */
77 PMDL apMdls[1];
78} RTR0MEMOBJNT, *PRTR0MEMOBJNT;
79
80
81
82DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
83{
84 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
85
86 /*
87 * Deal with it on a per type basis (just as a variation).
88 */
89 switch (pMemNt->Core.enmType)
90 {
91 case RTR0MEMOBJTYPE_LOW:
92#ifndef IPRT_TARGET_NT4
93 if (pMemNt->fAllocatedPagesForMdl)
94 {
95 Assert(pMemNt->Core.pv && pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
96 MmUnmapLockedPages(pMemNt->Core.pv, pMemNt->apMdls[0]);
97 pMemNt->Core.pv = NULL;
98 if (pMemNt->pvSecureMem)
99 {
100 MmUnsecureVirtualMemory(pMemNt->pvSecureMem);
101 pMemNt->pvSecureMem = NULL;
102 }
103
104 MmFreePagesFromMdl(pMemNt->apMdls[0]);
105 ExFreePool(pMemNt->apMdls[0]);
106 pMemNt->apMdls[0] = NULL;
107 pMemNt->cMdls = 0;
108 break;
109 }
110#endif
111 AssertFailed();
112 break;
113
114 case RTR0MEMOBJTYPE_PAGE:
115 Assert(pMemNt->Core.pv);
116 ExFreePool(pMemNt->Core.pv);
117 pMemNt->Core.pv = NULL;
118
119 Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
120 IoFreeMdl(pMemNt->apMdls[0]);
121 pMemNt->apMdls[0] = NULL;
122 pMemNt->cMdls = 0;
123 break;
124
125 case RTR0MEMOBJTYPE_CONT:
126 Assert(pMemNt->Core.pv);
127 MmFreeContiguousMemory(pMemNt->Core.pv);
128 pMemNt->Core.pv = NULL;
129
130 Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
131 IoFreeMdl(pMemNt->apMdls[0]);
132 pMemNt->apMdls[0] = NULL;
133 pMemNt->cMdls = 0;
134 break;
135
136 case RTR0MEMOBJTYPE_PHYS:
137 /* rtR0MemObjNativeEnterPhys? */
138 if (!pMemNt->Core.u.Phys.fAllocated)
139 {
140#ifndef IPRT_TARGET_NT4
141 Assert(!pMemNt->fAllocatedPagesForMdl);
142#endif
143 /* Nothing to do here. */
144 break;
145 }
146 RT_FALL_THRU();
147
148 case RTR0MEMOBJTYPE_PHYS_NC:
149#ifndef IPRT_TARGET_NT4
150 if (pMemNt->fAllocatedPagesForMdl)
151 {
152 MmFreePagesFromMdl(pMemNt->apMdls[0]);
153 ExFreePool(pMemNt->apMdls[0]);
154 pMemNt->apMdls[0] = NULL;
155 pMemNt->cMdls = 0;
156 break;
157 }
158#endif
159 AssertFailed();
160 break;
161
162 case RTR0MEMOBJTYPE_LOCK:
163 if (pMemNt->pvSecureMem)
164 {
165 MmUnsecureVirtualMemory(pMemNt->pvSecureMem);
166 pMemNt->pvSecureMem = NULL;
167 }
168 for (uint32_t i = 0; i < pMemNt->cMdls; i++)
169 {
170 MmUnlockPages(pMemNt->apMdls[i]);
171 IoFreeMdl(pMemNt->apMdls[i]);
172 pMemNt->apMdls[i] = NULL;
173 }
174 break;
175
176 case RTR0MEMOBJTYPE_RES_VIRT:
177/* if (pMemNt->Core.u.ResVirt.R0Process == NIL_RTR0PROCESS)
178 {
179 }
180 else
181 {
182 }*/
183 AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n"));
184 return VERR_INTERNAL_ERROR;
185 break;
186
187 case RTR0MEMOBJTYPE_MAPPING:
188 {
189 Assert(pMemNt->cMdls == 0 && pMemNt->Core.pv);
190 PRTR0MEMOBJNT pMemNtParent = (PRTR0MEMOBJNT)pMemNt->Core.uRel.Child.pParent;
191 Assert(pMemNtParent);
192 if (pMemNtParent->cMdls)
193 {
194 Assert(pMemNtParent->cMdls == 1 && pMemNtParent->apMdls[0]);
195 Assert( pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS
196 || pMemNt->Core.u.Mapping.R0Process == RTR0ProcHandleSelf());
197 MmUnmapLockedPages(pMemNt->Core.pv, pMemNtParent->apMdls[0]);
198 }
199 else
200 {
201 Assert( pMemNtParent->Core.enmType == RTR0MEMOBJTYPE_PHYS
202 && !pMemNtParent->Core.u.Phys.fAllocated);
203 Assert(pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS);
204 MmUnmapIoSpace(pMemNt->Core.pv, pMemNt->Core.cb);
205 }
206 pMemNt->Core.pv = NULL;
207 break;
208 }
209
210 default:
211 AssertMsgFailed(("enmType=%d\n", pMemNt->Core.enmType));
212 return VERR_INTERNAL_ERROR;
213 }
214
215 return VINF_SUCCESS;
216}
217
218
219DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
220{
221 AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
222 RT_NOREF1(fExecutable);
223
224 /*
225 * Try allocate the memory and create an MDL for them so
226 * we can query the physical addresses and do mappings later
227 * without running into out-of-memory conditions and similar problems.
228 */
229 int rc = VERR_NO_PAGE_MEMORY;
230 void *pv = ExAllocatePoolWithTag(NonPagedPool, cb, IPRT_NT_POOL_TAG);
231 if (pv)
232 {
233 PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
234 if (pMdl)
235 {
236 MmBuildMdlForNonPagedPool(pMdl);
237#ifdef RT_ARCH_AMD64
238 MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
239#endif
240
241 /*
242 * Create the IPRT memory object.
243 */
244 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PAGE, pv, cb);
245 if (pMemNt)
246 {
247 pMemNt->cMdls = 1;
248 pMemNt->apMdls[0] = pMdl;
249 *ppMem = &pMemNt->Core;
250 return VINF_SUCCESS;
251 }
252
253 rc = VERR_NO_MEMORY;
254 IoFreeMdl(pMdl);
255 }
256 ExFreePool(pv);
257 }
258 return rc;
259}
260
261
262DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
263{
264 AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
265
266 /*
267 * Try see if we get lucky first...
268 * (We could probably just assume we're lucky on NT4.)
269 */
270 int rc = rtR0MemObjNativeAllocPage(ppMem, cb, fExecutable);
271 if (RT_SUCCESS(rc))
272 {
273 size_t iPage = cb >> PAGE_SHIFT;
274 while (iPage-- > 0)
275 if (rtR0MemObjNativeGetPagePhysAddr(*ppMem, iPage) >= _4G)
276 {
277 rc = VERR_NO_LOW_MEMORY;
278 break;
279 }
280 if (RT_SUCCESS(rc))
281 return rc;
282
283 /* The following ASSUMES that rtR0MemObjNativeAllocPage returns a completed object. */
284 RTR0MemObjFree(*ppMem, false);
285 *ppMem = NULL;
286 }
287
288#ifndef IPRT_TARGET_NT4
289 /*
290 * Use MmAllocatePagesForMdl to specify the range of physical addresses we wish to use.
291 */
292 PHYSICAL_ADDRESS Zero;
293 Zero.QuadPart = 0;
294 PHYSICAL_ADDRESS HighAddr;
295 HighAddr.QuadPart = _4G - 1;
296 PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
297 if (pMdl)
298 {
299 if (MmGetMdlByteCount(pMdl) >= cb)
300 {
301 __try
302 {
303 void *pv = MmMapLockedPagesSpecifyCache(pMdl, KernelMode, MmCached, NULL /* no base address */,
304 FALSE /* no bug check on failure */, NormalPagePriority);
305 if (pv)
306 {
307 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_LOW, pv, cb);
308 if (pMemNt)
309 {
310 pMemNt->fAllocatedPagesForMdl = true;
311 pMemNt->cMdls = 1;
312 pMemNt->apMdls[0] = pMdl;
313 *ppMem = &pMemNt->Core;
314 return VINF_SUCCESS;
315 }
316 MmUnmapLockedPages(pv, pMdl);
317 }
318 }
319 __except(EXCEPTION_EXECUTE_HANDLER)
320 {
321# ifdef LOG_ENABLED
322 NTSTATUS rcNt = GetExceptionCode();
323 Log(("rtR0MemObjNativeAllocLow: Exception Code %#x\n", rcNt));
324# endif
325 /* nothing */
326 }
327 }
328 MmFreePagesFromMdl(pMdl);
329 ExFreePool(pMdl);
330 }
331#endif /* !IPRT_TARGET_NT4 */
332
333 /*
334 * Fall back on contiguous memory...
335 */
336 return rtR0MemObjNativeAllocCont(ppMem, cb, fExecutable);
337}
338
339
340/**
341 * Internal worker for rtR0MemObjNativeAllocCont(), rtR0MemObjNativeAllocPhys()
342 * and rtR0MemObjNativeAllocPhysNC() that takes a max physical address in addition
343 * to what rtR0MemObjNativeAllocCont() does.
344 *
345 * @returns IPRT status code.
346 * @param ppMem Where to store the pointer to the ring-0 memory object.
347 * @param cb The size.
348 * @param fExecutable Whether the mapping should be executable or not.
349 * @param PhysHighest The highest physical address for the pages in allocation.
350 * @param uAlignment The alignment of the physical memory to allocate.
351 * Supported values are PAGE_SIZE, _2M, _4M and _1G.
352 */
353static int rtR0MemObjNativeAllocContEx(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, RTHCPHYS PhysHighest,
354 size_t uAlignment)
355{
356 AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
357 RT_NOREF1(fExecutable);
358#ifdef IPRT_TARGET_NT4
359 if (uAlignment != PAGE_SIZE)
360 return VERR_NOT_SUPPORTED;
361#endif
362
363 /*
364 * Allocate the memory and create an MDL for it.
365 */
366 PHYSICAL_ADDRESS PhysAddrHighest;
367 PhysAddrHighest.QuadPart = PhysHighest;
368#ifndef IPRT_TARGET_NT4
369 PHYSICAL_ADDRESS PhysAddrLowest, PhysAddrBoundary;
370 PhysAddrLowest.QuadPart = 0;
371 PhysAddrBoundary.QuadPart = (uAlignment == PAGE_SIZE) ? 0 : uAlignment;
372 void *pv = MmAllocateContiguousMemorySpecifyCache(cb, PhysAddrLowest, PhysAddrHighest, PhysAddrBoundary, MmCached);
373#else
374 void *pv = MmAllocateContiguousMemory(cb, PhysAddrHighest);
375#endif
376 if (!pv)
377 return VERR_NO_MEMORY;
378
379 PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
380 if (pMdl)
381 {
382 MmBuildMdlForNonPagedPool(pMdl);
383#ifdef RT_ARCH_AMD64
384 MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
385#endif
386
387 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_CONT, pv, cb);
388 if (pMemNt)
389 {
390 pMemNt->Core.u.Cont.Phys = (RTHCPHYS)*MmGetMdlPfnArray(pMdl) << PAGE_SHIFT;
391 pMemNt->cMdls = 1;
392 pMemNt->apMdls[0] = pMdl;
393 *ppMem = &pMemNt->Core;
394 return VINF_SUCCESS;
395 }
396
397 IoFreeMdl(pMdl);
398 }
399 MmFreeContiguousMemory(pv);
400 return VERR_NO_MEMORY;
401}
402
403
404DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
405{
406 return rtR0MemObjNativeAllocContEx(ppMem, cb, fExecutable, _4G-1, PAGE_SIZE /* alignment */);
407}
408
409
410DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
411{
412#ifndef IPRT_TARGET_NT4
413 /*
414 * Try and see if we're lucky and get a contiguous chunk from MmAllocatePagesForMdl.
415 *
416 * This is preferable to using MmAllocateContiguousMemory because there are
417 * a few situations where the memory shouldn't be mapped, like for instance
418 * VT-x control memory. Since these are rather small allocations (one or
419 * two pages) MmAllocatePagesForMdl will probably be able to satisfy the
420 * request.
421 *
422 * If the allocation is big, the chances are *probably* not very good. The
423 * current limit is kind of random...
424 */
425 if ( cb < _128K
426 && uAlignment == PAGE_SIZE)
427
428 {
429 PHYSICAL_ADDRESS Zero;
430 Zero.QuadPart = 0;
431 PHYSICAL_ADDRESS HighAddr;
432 HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
433 PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
434 if (pMdl)
435 {
436 if (MmGetMdlByteCount(pMdl) >= cb)
437 {
438 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMdl);
439 PFN_NUMBER Pfn = paPfns[0] + 1;
440 const size_t cPages = cb >> PAGE_SHIFT;
441 size_t iPage;
442 for (iPage = 1; iPage < cPages; iPage++, Pfn++)
443 if (paPfns[iPage] != Pfn)
444 break;
445 if (iPage >= cPages)
446 {
447 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb);
448 if (pMemNt)
449 {
450 pMemNt->Core.u.Phys.fAllocated = true;
451 pMemNt->Core.u.Phys.PhysBase = (RTHCPHYS)paPfns[0] << PAGE_SHIFT;
452 pMemNt->fAllocatedPagesForMdl = true;
453 pMemNt->cMdls = 1;
454 pMemNt->apMdls[0] = pMdl;
455 *ppMem = &pMemNt->Core;
456 return VINF_SUCCESS;
457 }
458 }
459 }
460 MmFreePagesFromMdl(pMdl);
461 ExFreePool(pMdl);
462 }
463 }
464#endif /* !IPRT_TARGET_NT4 */
465
466 return rtR0MemObjNativeAllocContEx(ppMem, cb, false, PhysHighest, uAlignment);
467}
468
469
470DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
471{
472#ifndef IPRT_TARGET_NT4
473 PHYSICAL_ADDRESS Zero;
474 Zero.QuadPart = 0;
475 PHYSICAL_ADDRESS HighAddr;
476 HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
477 PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
478 if (pMdl)
479 {
480 if (MmGetMdlByteCount(pMdl) >= cb)
481 {
482 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS_NC, NULL, cb);
483 if (pMemNt)
484 {
485 pMemNt->fAllocatedPagesForMdl = true;
486 pMemNt->cMdls = 1;
487 pMemNt->apMdls[0] = pMdl;
488 *ppMem = &pMemNt->Core;
489 return VINF_SUCCESS;
490 }
491 }
492 MmFreePagesFromMdl(pMdl);
493 ExFreePool(pMdl);
494 }
495 return VERR_NO_MEMORY;
496#else /* IPRT_TARGET_NT4 */
497 RT_NOREF(ppMem, cb, PhysHighest);
498 return VERR_NOT_SUPPORTED;
499#endif /* IPRT_TARGET_NT4 */
500}
501
502
503DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
504{
505 AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE || uCachePolicy == RTMEM_CACHE_POLICY_MMIO, VERR_NOT_SUPPORTED);
506
507 /*
508 * Validate the address range and create a descriptor for it.
509 */
510 PFN_NUMBER Pfn = (PFN_NUMBER)(Phys >> PAGE_SHIFT);
511 if (((RTHCPHYS)Pfn << PAGE_SHIFT) != Phys)
512 return VERR_ADDRESS_TOO_BIG;
513
514 /*
515 * Create the IPRT memory object.
516 */
517 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb);
518 if (pMemNt)
519 {
520 pMemNt->Core.u.Phys.PhysBase = Phys;
521 pMemNt->Core.u.Phys.fAllocated = false;
522 pMemNt->Core.u.Phys.uCachePolicy = uCachePolicy;
523 *ppMem = &pMemNt->Core;
524 return VINF_SUCCESS;
525 }
526 return VERR_NO_MEMORY;
527}
528
529
530/**
531 * Internal worker for locking down pages.
532 *
533 * @return IPRT status code.
534 *
535 * @param ppMem Where to store the memory object pointer.
536 * @param pv First page.
537 * @param cb Number of bytes.
538 * @param fAccess The desired access, a combination of RTMEM_PROT_READ
539 * and RTMEM_PROT_WRITE.
540 * @param R0Process The process \a pv and \a cb refers to.
541 */
542static int rtR0MemObjNtLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
543{
544 /*
545 * Calc the number of MDLs we need and allocate the memory object structure.
546 */
547 size_t cMdls = cb / MAX_LOCK_MEM_SIZE;
548 if (cb % MAX_LOCK_MEM_SIZE)
549 cMdls++;
550 if (cMdls >= UINT32_MAX)
551 return VERR_OUT_OF_RANGE;
552 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJNT, apMdls[cMdls]),
553 RTR0MEMOBJTYPE_LOCK, pv, cb);
554 if (!pMemNt)
555 return VERR_NO_MEMORY;
556
557 /*
558 * Loop locking down the sub parts of the memory.
559 */
560 int rc = VINF_SUCCESS;
561 size_t cbTotal = 0;
562 uint8_t *pb = (uint8_t *)pv;
563 uint32_t iMdl;
564 for (iMdl = 0; iMdl < cMdls; iMdl++)
565 {
566 /*
567 * Calc the Mdl size and allocate it.
568 */
569 size_t cbCur = cb - cbTotal;
570 if (cbCur > MAX_LOCK_MEM_SIZE)
571 cbCur = MAX_LOCK_MEM_SIZE;
572 AssertMsg(cbCur, ("cbCur: 0!\n"));
573 PMDL pMdl = IoAllocateMdl(pb, (ULONG)cbCur, FALSE, FALSE, NULL);
574 if (!pMdl)
575 {
576 rc = VERR_NO_MEMORY;
577 break;
578 }
579
580 /*
581 * Lock the pages.
582 */
583 __try
584 {
585 MmProbeAndLockPages(pMdl,
586 R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode,
587 fAccess == RTMEM_PROT_READ
588 ? IoReadAccess
589 : fAccess == RTMEM_PROT_WRITE
590 ? IoWriteAccess
591 : IoModifyAccess);
592
593 pMemNt->apMdls[iMdl] = pMdl;
594 pMemNt->cMdls++;
595 }
596 __except(EXCEPTION_EXECUTE_HANDLER)
597 {
598 IoFreeMdl(pMdl);
599 rc = VERR_LOCK_FAILED;
600 break;
601 }
602
603 if (R0Process != NIL_RTR0PROCESS)
604 {
605 /* Make sure the user process can't change the allocation. */
606 pMemNt->pvSecureMem = MmSecureVirtualMemory(pv, cb,
607 fAccess & RTMEM_PROT_WRITE
608 ? PAGE_READWRITE
609 : PAGE_READONLY);
610 if (!pMemNt->pvSecureMem)
611 {
612 rc = VERR_NO_MEMORY;
613 break;
614 }
615 }
616
617 /* next */
618 cbTotal += cbCur;
619 pb += cbCur;
620 }
621 if (RT_SUCCESS(rc))
622 {
623 Assert(pMemNt->cMdls == cMdls);
624 pMemNt->Core.u.Lock.R0Process = R0Process;
625 *ppMem = &pMemNt->Core;
626 return rc;
627 }
628
629 /*
630 * We failed, perform cleanups.
631 */
632 while (iMdl-- > 0)
633 {
634 MmUnlockPages(pMemNt->apMdls[iMdl]);
635 IoFreeMdl(pMemNt->apMdls[iMdl]);
636 pMemNt->apMdls[iMdl] = NULL;
637 }
638 if (pMemNt->pvSecureMem)
639 {
640 MmUnsecureVirtualMemory(pMemNt->pvSecureMem);
641 pMemNt->pvSecureMem = NULL;
642 }
643
644 rtR0MemObjDelete(&pMemNt->Core);
645 return rc;
646}
647
648
649DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess,
650 RTR0PROCESS R0Process)
651{
652 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
653 /* (Can use MmProbeAndLockProcessPages if we need to mess with other processes later.) */
654 return rtR0MemObjNtLock(ppMem, (void *)R3Ptr, cb, fAccess, R0Process);
655}
656
657
658DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
659{
660 return rtR0MemObjNtLock(ppMem, pv, cb, fAccess, NIL_RTR0PROCESS);
661}
662
663
664DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
665{
666 /*
667 * MmCreateSection(SEC_RESERVE) + MmMapViewInSystemSpace perhaps?
668 */
669 RT_NOREF4(ppMem, pvFixed, cb, uAlignment);
670 return VERR_NOT_SUPPORTED;
671}
672
673
674DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment,
675 RTR0PROCESS R0Process)
676{
677 /*
678 * ZeCreateSection(SEC_RESERVE) + ZwMapViewOfSection perhaps?
679 */
680 RT_NOREF5(ppMem, R3PtrFixed, cb, uAlignment, R0Process);
681 return VERR_NOT_SUPPORTED;
682}
683
684
685/**
686 * Internal worker for rtR0MemObjNativeMapKernel and rtR0MemObjNativeMapUser.
687 *
688 * @returns IPRT status code.
689 * @param ppMem Where to store the memory object for the mapping.
690 * @param pMemToMap The memory object to map.
691 * @param pvFixed Where to map it. (void *)-1 if anywhere is fine.
692 * @param uAlignment The alignment requirement for the mapping.
693 * @param fProt The desired page protection for the mapping.
694 * @param R0Process If NIL_RTR0PROCESS map into system (kernel) memory.
695 * If not nil, it's the current process.
696 */
697static int rtR0MemObjNtMap(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
698 unsigned fProt, RTR0PROCESS R0Process)
699{
700 int rc = VERR_MAP_FAILED;
701
702 /*
703 * Check that the specified alignment is supported.
704 */
705 if (uAlignment > PAGE_SIZE)
706 return VERR_NOT_SUPPORTED;
707
708 /*
709 * There are two basic cases here, either we've got an MDL and can
710 * map it using MmMapLockedPages, or we've got a contiguous physical
711 * range (MMIO most likely) and can use MmMapIoSpace.
712 */
713 PRTR0MEMOBJNT pMemNtToMap = (PRTR0MEMOBJNT)pMemToMap;
714 if (pMemNtToMap->cMdls)
715 {
716 /* don't attempt map locked regions with more than one mdl. */
717 if (pMemNtToMap->cMdls != 1)
718 return VERR_NOT_SUPPORTED;
719
720#ifdef IPRT_TARGET_NT4
721 /* NT SP0 can't map to a specific address. */
722 if (pvFixed != (void *)-1)
723 return VERR_NOT_SUPPORTED;
724#endif
725
726 /* we can't map anything to the first page, sorry. */
727 if (pvFixed == 0)
728 return VERR_NOT_SUPPORTED;
729
730 /* only one system mapping for now - no time to figure out MDL restrictions right now. */
731 if ( pMemNtToMap->Core.uRel.Parent.cMappings
732 && R0Process == NIL_RTR0PROCESS)
733 return VERR_NOT_SUPPORTED;
734
735 __try
736 {
737 /** @todo uAlignment */
738 /** @todo How to set the protection on the pages? */
739#ifdef IPRT_TARGET_NT4
740 void *pv = MmMapLockedPages(pMemNtToMap->apMdls[0],
741 R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode);
742#else
743 void *pv = MmMapLockedPagesSpecifyCache(pMemNtToMap->apMdls[0],
744 R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode,
745 MmCached,
746 pvFixed != (void *)-1 ? pvFixed : NULL,
747 FALSE /* no bug check on failure */,
748 NormalPagePriority);
749#endif
750 if (pv)
751 {
752 NOREF(fProt);
753
754 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING, pv,
755 pMemNtToMap->Core.cb);
756 if (pMemNt)
757 {
758 pMemNt->Core.u.Mapping.R0Process = R0Process;
759 *ppMem = &pMemNt->Core;
760 return VINF_SUCCESS;
761 }
762
763 rc = VERR_NO_MEMORY;
764 MmUnmapLockedPages(pv, pMemNtToMap->apMdls[0]);
765 }
766 }
767 __except(EXCEPTION_EXECUTE_HANDLER)
768 {
769#ifdef LOG_ENABLED
770 NTSTATUS rcNt = GetExceptionCode();
771 Log(("rtR0MemObjNtMap: Exception Code %#x\n", rcNt));
772#endif
773
774 /* nothing */
775 rc = VERR_MAP_FAILED;
776 }
777
778 }
779 else
780 {
781 AssertReturn( pMemNtToMap->Core.enmType == RTR0MEMOBJTYPE_PHYS
782 && !pMemNtToMap->Core.u.Phys.fAllocated, VERR_INTERNAL_ERROR);
783
784 /* cannot map phys mem to user space (yet). */
785 if (R0Process != NIL_RTR0PROCESS)
786 return VERR_NOT_SUPPORTED;
787
788 /** @todo uAlignment */
789 /** @todo How to set the protection on the pages? */
790 PHYSICAL_ADDRESS Phys;
791 Phys.QuadPart = pMemNtToMap->Core.u.Phys.PhysBase;
792 void *pv = MmMapIoSpace(Phys, pMemNtToMap->Core.cb,
793 pMemNtToMap->Core.u.Phys.uCachePolicy == RTMEM_CACHE_POLICY_MMIO ? MmNonCached : MmCached);
794 if (pv)
795 {
796 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING, pv,
797 pMemNtToMap->Core.cb);
798 if (pMemNt)
799 {
800 pMemNt->Core.u.Mapping.R0Process = R0Process;
801 *ppMem = &pMemNt->Core;
802 return VINF_SUCCESS;
803 }
804
805 rc = VERR_NO_MEMORY;
806 MmUnmapIoSpace(pv, pMemNtToMap->Core.cb);
807 }
808 }
809
810 NOREF(uAlignment); NOREF(fProt);
811 return rc;
812}
813
814
815DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
816 unsigned fProt, size_t offSub, size_t cbSub)
817{
818 AssertMsgReturn(!offSub && !cbSub, ("%#x %#x\n", offSub, cbSub), VERR_NOT_SUPPORTED);
819 return rtR0MemObjNtMap(ppMem, pMemToMap, pvFixed, uAlignment, fProt, NIL_RTR0PROCESS);
820}
821
822
823DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
824{
825 AssertReturn(R0Process == RTR0ProcHandleSelf(), VERR_NOT_SUPPORTED);
826 return rtR0MemObjNtMap(ppMem, pMemToMap, (void *)R3PtrFixed, uAlignment, fProt, R0Process);
827}
828
829
830DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
831{
832#if 0
833 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
834#endif
835
836 /*
837 * Seems there are some issues with this MmProtectMdlSystemAddress API, so
838 * this code isn't currently enabled until we've tested it with the verifier.
839 */
840#if 0
841 /*
842 * The API we've got requires a kernel mapping.
843 */
844 if ( pMemNt->cMdls
845 && g_pfnrtMmProtectMdlSystemAddress
846 && (g_uRtNtMajorVer > 6 || (g_uRtNtMajorVer == 6 && g_uRtNtMinorVer >= 1)) /* Windows 7 and later. */
847 && pMemNt->Core.pv != NULL
848 && ( pMemNt->Core.enmType == RTR0MEMOBJTYPE_PAGE
849 || pMemNt->Core.enmType == RTR0MEMOBJTYPE_LOW
850 || pMemNt->Core.enmType == RTR0MEMOBJTYPE_CONT
851 || ( pMemNt->Core.enmType == RTR0MEMOBJTYPE_LOCK
852 && pMemNt->Core.u.Lock.R0Process == NIL_RTPROCESS)
853 || ( pMemNt->Core.enmType == RTR0MEMOBJTYPE_MAPPING
854 && pMemNt->Core.u.Mapping.R0Process == NIL_RTPROCESS) ) )
855 {
856 /* Convert the protection. */
857 LOCK_OPERATION enmLockOp;
858 ULONG fAccess;
859 switch (fProt)
860 {
861 case RTMEM_PROT_NONE:
862 fAccess = PAGE_NOACCESS;
863 enmLockOp = IoReadAccess;
864 break;
865 case RTMEM_PROT_READ:
866 fAccess = PAGE_READONLY;
867 enmLockOp = IoReadAccess;
868 break;
869 case RTMEM_PROT_WRITE:
870 case RTMEM_PROT_WRITE | RTMEM_PROT_READ:
871 fAccess = PAGE_READWRITE;
872 enmLockOp = IoModifyAccess;
873 break;
874 case RTMEM_PROT_EXEC:
875 fAccess = PAGE_EXECUTE;
876 enmLockOp = IoReadAccess;
877 break;
878 case RTMEM_PROT_EXEC | RTMEM_PROT_READ:
879 fAccess = PAGE_EXECUTE_READ;
880 enmLockOp = IoReadAccess;
881 break;
882 case RTMEM_PROT_EXEC | RTMEM_PROT_WRITE:
883 case RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ:
884 fAccess = PAGE_EXECUTE_READWRITE;
885 enmLockOp = IoModifyAccess;
886 break;
887 default:
888 AssertFailedReturn(VERR_INVALID_FLAGS);
889 }
890
891 NTSTATUS rcNt = STATUS_SUCCESS;
892# if 0 /** @todo test this against the verifier. */
893 if (offSub == 0 && pMemNt->Core.cb == cbSub)
894 {
895 uint32_t iMdl = pMemNt->cMdls;
896 while (iMdl-- > 0)
897 {
898 rcNt = g_pfnrtMmProtectMdlSystemAddress(pMemNt->apMdls[i], fAccess);
899 if (!NT_SUCCESS(rcNt))
900 break;
901 }
902 }
903 else
904# endif
905 {
906 /*
907 * We ASSUME the following here:
908 * - MmProtectMdlSystemAddress can deal with nonpaged pool memory
909 * - MmProtectMdlSystemAddress doesn't actually store anything in the MDL we pass it.
910 * - We are not required to call MmProtectMdlSystemAddress with PAGE_READWRITE for the
911 * exact same ranges prior to freeing them.
912 *
913 * So, we lock the pages temporarily, call the API and unlock them.
914 */
915 uint8_t *pbCur = (uint8_t *)pMemNt->Core.pv + offSub;
916 while (cbSub > 0 && NT_SUCCESS(rcNt))
917 {
918 size_t cbCur = cbSub;
919 if (cbCur > MAX_LOCK_MEM_SIZE)
920 cbCur = MAX_LOCK_MEM_SIZE;
921 PMDL pMdl = IoAllocateMdl(pbCur, (ULONG)cbCur, FALSE, FALSE, NULL);
922 if (pMdl)
923 {
924 __try
925 {
926 MmProbeAndLockPages(pMdl, KernelMode, enmLockOp);
927 }
928 __except(EXCEPTION_EXECUTE_HANDLER)
929 {
930 rcNt = GetExceptionCode();
931 }
932 if (NT_SUCCESS(rcNt))
933 {
934 rcNt = g_pfnrtMmProtectMdlSystemAddress(pMdl, fAccess);
935 MmUnlockPages(pMdl);
936 }
937 IoFreeMdl(pMdl);
938 }
939 else
940 rcNt = STATUS_NO_MEMORY;
941 pbCur += cbCur;
942 cbSub -= cbCur;
943 }
944 }
945
946 if (NT_SUCCESS(rcNt))
947 return VINF_SUCCESS;
948 return RTErrConvertFromNtStatus(rcNt);
949 }
950#else
951 RT_NOREF4(pMem, offSub, cbSub, fProt);
952#endif
953
954 return VERR_NOT_SUPPORTED;
955}
956
957
958DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
959{
960 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
961
962 if (pMemNt->cMdls)
963 {
964 if (pMemNt->cMdls == 1)
965 {
966 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[0]);
967 return (RTHCPHYS)paPfns[iPage] << PAGE_SHIFT;
968 }
969
970 size_t iMdl = iPage / (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
971 size_t iMdlPfn = iPage % (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
972 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[iMdl]);
973 return (RTHCPHYS)paPfns[iMdlPfn] << PAGE_SHIFT;
974 }
975
976 switch (pMemNt->Core.enmType)
977 {
978 case RTR0MEMOBJTYPE_MAPPING:
979 return rtR0MemObjNativeGetPagePhysAddr(pMemNt->Core.uRel.Child.pParent, iPage);
980
981 case RTR0MEMOBJTYPE_PHYS:
982 return pMemNt->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT);
983
984 case RTR0MEMOBJTYPE_PAGE:
985 case RTR0MEMOBJTYPE_PHYS_NC:
986 case RTR0MEMOBJTYPE_LOW:
987 case RTR0MEMOBJTYPE_CONT:
988 case RTR0MEMOBJTYPE_LOCK:
989 default:
990 AssertMsgFailed(("%d\n", pMemNt->Core.enmType));
991 case RTR0MEMOBJTYPE_RES_VIRT:
992 return NIL_RTHCPHYS;
993 }
994}
995
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette