VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/nt/memobj-r0drv-nt.cpp@ 74100

Last change on this file since 74100 was 73097, checked in by vboxsync, 7 years ago

*: Made RT_UOFFSETOF, RT_OFFSETOF, RT_UOFFSETOF_ADD and RT_OFFSETOF_ADD work like builtin_offsetof() and require compile time resolvable requests, adding RT_UOFFSETOF_DYN for the dynamic questions that can only be answered at runtime.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 34.9 KB
Line 
1/* $Id: memobj-r0drv-nt.cpp 73097 2018-07-12 21:06:33Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, NT.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#include "the-nt-kernel.h"
32
33#include <iprt/memobj.h>
34#include <iprt/alloc.h>
35#include <iprt/assert.h>
36#include <iprt/log.h>
37#include <iprt/param.h>
38#include <iprt/string.h>
39#include <iprt/process.h>
40#include "internal/memobj.h"
41#include "internal-r0drv-nt.h"
42
43
44/*********************************************************************************************************************************
45* Defined Constants And Macros *
46*********************************************************************************************************************************/
47/** Maximum number of bytes we try to lock down in one go.
48 * This is supposed to have a limit right below 256MB, but this appears
49 * to actually be much lower. The values here have been determined experimentally.
50 */
51#ifdef RT_ARCH_X86
52# define MAX_LOCK_MEM_SIZE (32*1024*1024) /* 32MB */
53#endif
54#ifdef RT_ARCH_AMD64
55# define MAX_LOCK_MEM_SIZE (24*1024*1024) /* 24MB */
56#endif
57
58
59/*********************************************************************************************************************************
60* Structures and Typedefs *
61*********************************************************************************************************************************/
62/**
63 * The NT version of the memory object structure.
64 */
65typedef struct RTR0MEMOBJNT
66{
67 /** The core structure. */
68 RTR0MEMOBJINTERNAL Core;
69 /** Used MmAllocatePagesForMdl(). */
70 bool fAllocatedPagesForMdl;
71 /** Pointer returned by MmSecureVirtualMemory */
72 PVOID pvSecureMem;
73 /** The number of PMDLs (memory descriptor lists) in the array. */
74 uint32_t cMdls;
75 /** Array of MDL pointers. (variable size) */
76 PMDL apMdls[1];
77} RTR0MEMOBJNT, *PRTR0MEMOBJNT;
78
79
80
81DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
82{
83 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
84
85 /*
86 * Deal with it on a per type basis (just as a variation).
87 */
88 switch (pMemNt->Core.enmType)
89 {
90 case RTR0MEMOBJTYPE_LOW:
91 if (pMemNt->fAllocatedPagesForMdl)
92 {
93 Assert(pMemNt->Core.pv && pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
94 MmUnmapLockedPages(pMemNt->Core.pv, pMemNt->apMdls[0]);
95 pMemNt->Core.pv = NULL;
96 if (pMemNt->pvSecureMem)
97 {
98 g_pfnrtMmUnsecureVirtualMemory(pMemNt->pvSecureMem);
99 pMemNt->pvSecureMem = NULL;
100 }
101
102 g_pfnrtMmFreePagesFromMdl(pMemNt->apMdls[0]);
103 ExFreePool(pMemNt->apMdls[0]);
104 pMemNt->apMdls[0] = NULL;
105 pMemNt->cMdls = 0;
106 break;
107 }
108 AssertFailed();
109 break;
110
111 case RTR0MEMOBJTYPE_PAGE:
112 Assert(pMemNt->Core.pv);
113 if (g_pfnrtExFreePoolWithTag)
114 g_pfnrtExFreePoolWithTag(pMemNt->Core.pv, IPRT_NT_POOL_TAG);
115 else
116 ExFreePool(pMemNt->Core.pv);
117 pMemNt->Core.pv = NULL;
118
119 Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
120 IoFreeMdl(pMemNt->apMdls[0]);
121 pMemNt->apMdls[0] = NULL;
122 pMemNt->cMdls = 0;
123 break;
124
125 case RTR0MEMOBJTYPE_CONT:
126 Assert(pMemNt->Core.pv);
127 MmFreeContiguousMemory(pMemNt->Core.pv);
128 pMemNt->Core.pv = NULL;
129
130 Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
131 IoFreeMdl(pMemNt->apMdls[0]);
132 pMemNt->apMdls[0] = NULL;
133 pMemNt->cMdls = 0;
134 break;
135
136 case RTR0MEMOBJTYPE_PHYS:
137 /* rtR0MemObjNativeEnterPhys? */
138 if (!pMemNt->Core.u.Phys.fAllocated)
139 {
140 Assert(!pMemNt->fAllocatedPagesForMdl);
141 /* Nothing to do here. */
142 break;
143 }
144 RT_FALL_THRU();
145
146 case RTR0MEMOBJTYPE_PHYS_NC:
147 if (pMemNt->fAllocatedPagesForMdl)
148 {
149 g_pfnrtMmFreePagesFromMdl(pMemNt->apMdls[0]);
150 ExFreePool(pMemNt->apMdls[0]);
151 pMemNt->apMdls[0] = NULL;
152 pMemNt->cMdls = 0;
153 break;
154 }
155 AssertFailed();
156 break;
157
158 case RTR0MEMOBJTYPE_LOCK:
159 if (pMemNt->pvSecureMem)
160 {
161 g_pfnrtMmUnsecureVirtualMemory(pMemNt->pvSecureMem);
162 pMemNt->pvSecureMem = NULL;
163 }
164 for (uint32_t i = 0; i < pMemNt->cMdls; i++)
165 {
166 MmUnlockPages(pMemNt->apMdls[i]);
167 IoFreeMdl(pMemNt->apMdls[i]);
168 pMemNt->apMdls[i] = NULL;
169 }
170 break;
171
172 case RTR0MEMOBJTYPE_RES_VIRT:
173/* if (pMemNt->Core.u.ResVirt.R0Process == NIL_RTR0PROCESS)
174 {
175 }
176 else
177 {
178 }*/
179 AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n"));
180 return VERR_INTERNAL_ERROR;
181 break;
182
183 case RTR0MEMOBJTYPE_MAPPING:
184 {
185 Assert(pMemNt->cMdls == 0 && pMemNt->Core.pv);
186 PRTR0MEMOBJNT pMemNtParent = (PRTR0MEMOBJNT)pMemNt->Core.uRel.Child.pParent;
187 Assert(pMemNtParent);
188 if (pMemNtParent->cMdls)
189 {
190 Assert(pMemNtParent->cMdls == 1 && pMemNtParent->apMdls[0]);
191 Assert( pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS
192 || pMemNt->Core.u.Mapping.R0Process == RTR0ProcHandleSelf());
193 MmUnmapLockedPages(pMemNt->Core.pv, pMemNtParent->apMdls[0]);
194 }
195 else
196 {
197 Assert( pMemNtParent->Core.enmType == RTR0MEMOBJTYPE_PHYS
198 && !pMemNtParent->Core.u.Phys.fAllocated);
199 Assert(pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS);
200 MmUnmapIoSpace(pMemNt->Core.pv, pMemNt->Core.cb);
201 }
202 pMemNt->Core.pv = NULL;
203 break;
204 }
205
206 default:
207 AssertMsgFailed(("enmType=%d\n", pMemNt->Core.enmType));
208 return VERR_INTERNAL_ERROR;
209 }
210
211 return VINF_SUCCESS;
212}
213
214
215DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
216{
217 AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
218 RT_NOREF1(fExecutable);
219
220 /*
221 * Try allocate the memory and create an MDL for them so
222 * we can query the physical addresses and do mappings later
223 * without running into out-of-memory conditions and similar problems.
224 */
225 int rc = VERR_NO_PAGE_MEMORY;
226 void *pv;
227 if (g_pfnrtExAllocatePoolWithTag)
228 pv = g_pfnrtExAllocatePoolWithTag(NonPagedPool, cb, IPRT_NT_POOL_TAG);
229 else
230 pv = ExAllocatePool(NonPagedPool, cb);
231 if (pv)
232 {
233 PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
234 if (pMdl)
235 {
236 MmBuildMdlForNonPagedPool(pMdl);
237#ifdef RT_ARCH_AMD64
238 MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
239#endif
240
241 /*
242 * Create the IPRT memory object.
243 */
244 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PAGE, pv, cb);
245 if (pMemNt)
246 {
247 pMemNt->cMdls = 1;
248 pMemNt->apMdls[0] = pMdl;
249 *ppMem = &pMemNt->Core;
250 return VINF_SUCCESS;
251 }
252
253 rc = VERR_NO_MEMORY;
254 IoFreeMdl(pMdl);
255 }
256 ExFreePool(pv);
257 }
258 return rc;
259}
260
261
262DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
263{
264 AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
265
266 /*
267 * Try see if we get lucky first...
268 * (We could probably just assume we're lucky on NT4.)
269 */
270 int rc = rtR0MemObjNativeAllocPage(ppMem, cb, fExecutable);
271 if (RT_SUCCESS(rc))
272 {
273 size_t iPage = cb >> PAGE_SHIFT;
274 while (iPage-- > 0)
275 if (rtR0MemObjNativeGetPagePhysAddr(*ppMem, iPage) >= _4G)
276 {
277 rc = VERR_NO_LOW_MEMORY;
278 break;
279 }
280 if (RT_SUCCESS(rc))
281 return rc;
282
283 /* The following ASSUMES that rtR0MemObjNativeAllocPage returns a completed object. */
284 RTR0MemObjFree(*ppMem, false);
285 *ppMem = NULL;
286 }
287
288 /*
289 * Use MmAllocatePagesForMdl to specify the range of physical addresses we wish to use.
290 */
291 if ( g_pfnrtMmAllocatePagesForMdl
292 && g_pfnrtMmFreePagesFromMdl
293 && g_pfnrtMmMapLockedPagesSpecifyCache)
294 {
295 PHYSICAL_ADDRESS Zero;
296 Zero.QuadPart = 0;
297 PHYSICAL_ADDRESS HighAddr;
298 HighAddr.QuadPart = _4G - 1;
299 PMDL pMdl = g_pfnrtMmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
300 if (pMdl)
301 {
302 if (MmGetMdlByteCount(pMdl) >= cb)
303 {
304 __try
305 {
306 void *pv = g_pfnrtMmMapLockedPagesSpecifyCache(pMdl, KernelMode, MmCached, NULL /* no base address */,
307 FALSE /* no bug check on failure */, NormalPagePriority);
308 if (pv)
309 {
310 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_LOW, pv, cb);
311 if (pMemNt)
312 {
313 pMemNt->fAllocatedPagesForMdl = true;
314 pMemNt->cMdls = 1;
315 pMemNt->apMdls[0] = pMdl;
316 *ppMem = &pMemNt->Core;
317 return VINF_SUCCESS;
318 }
319 MmUnmapLockedPages(pv, pMdl);
320 }
321 }
322 __except(EXCEPTION_EXECUTE_HANDLER)
323 {
324# ifdef LOG_ENABLED
325 NTSTATUS rcNt = GetExceptionCode();
326 Log(("rtR0MemObjNativeAllocLow: Exception Code %#x\n", rcNt));
327# endif
328 /* nothing */
329 }
330 }
331 g_pfnrtMmFreePagesFromMdl(pMdl);
332 ExFreePool(pMdl);
333 }
334 }
335
336 /*
337 * Fall back on contiguous memory...
338 */
339 return rtR0MemObjNativeAllocCont(ppMem, cb, fExecutable);
340}
341
342
343/**
344 * Internal worker for rtR0MemObjNativeAllocCont(), rtR0MemObjNativeAllocPhys()
345 * and rtR0MemObjNativeAllocPhysNC() that takes a max physical address in addition
346 * to what rtR0MemObjNativeAllocCont() does.
347 *
348 * @returns IPRT status code.
349 * @param ppMem Where to store the pointer to the ring-0 memory object.
350 * @param cb The size.
351 * @param fExecutable Whether the mapping should be executable or not.
352 * @param PhysHighest The highest physical address for the pages in allocation.
353 * @param uAlignment The alignment of the physical memory to allocate.
354 * Supported values are PAGE_SIZE, _2M, _4M and _1G.
355 */
356static int rtR0MemObjNativeAllocContEx(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, RTHCPHYS PhysHighest,
357 size_t uAlignment)
358{
359 AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
360 RT_NOREF1(fExecutable);
361
362 /*
363 * Allocate the memory and create an MDL for it.
364 */
365 PHYSICAL_ADDRESS PhysAddrHighest;
366 PhysAddrHighest.QuadPart = PhysHighest;
367 void *pv;
368 if (g_pfnrtMmAllocateContiguousMemorySpecifyCache)
369 {
370 PHYSICAL_ADDRESS PhysAddrLowest, PhysAddrBoundary;
371 PhysAddrLowest.QuadPart = 0;
372 PhysAddrBoundary.QuadPart = (uAlignment == PAGE_SIZE) ? 0 : uAlignment;
373 pv = g_pfnrtMmAllocateContiguousMemorySpecifyCache(cb, PhysAddrLowest, PhysAddrHighest, PhysAddrBoundary, MmCached);
374 }
375 else if (uAlignment == PAGE_SIZE)
376 pv = MmAllocateContiguousMemory(cb, PhysAddrHighest);
377 else
378 return VERR_NOT_SUPPORTED;
379 if (!pv)
380 return VERR_NO_MEMORY;
381
382 PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
383 if (pMdl)
384 {
385 MmBuildMdlForNonPagedPool(pMdl);
386#ifdef RT_ARCH_AMD64
387 MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
388#endif
389
390 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_CONT, pv, cb);
391 if (pMemNt)
392 {
393 pMemNt->Core.u.Cont.Phys = (RTHCPHYS)*MmGetMdlPfnArray(pMdl) << PAGE_SHIFT;
394 pMemNt->cMdls = 1;
395 pMemNt->apMdls[0] = pMdl;
396 *ppMem = &pMemNt->Core;
397 return VINF_SUCCESS;
398 }
399
400 IoFreeMdl(pMdl);
401 }
402 MmFreeContiguousMemory(pv);
403 return VERR_NO_MEMORY;
404}
405
406
407DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
408{
409 return rtR0MemObjNativeAllocContEx(ppMem, cb, fExecutable, _4G-1, PAGE_SIZE /* alignment */);
410}
411
412
413DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
414{
415 /*
416 * Try and see if we're lucky and get a contiguous chunk from MmAllocatePagesForMdl.
417 *
418 * This is preferable to using MmAllocateContiguousMemory because there are
419 * a few situations where the memory shouldn't be mapped, like for instance
420 * VT-x control memory. Since these are rather small allocations (one or
421 * two pages) MmAllocatePagesForMdl will probably be able to satisfy the
422 * request.
423 *
424 * If the allocation is big, the chances are *probably* not very good. The
425 * current limit is kind of random...
426 */
427 if ( cb < _128K
428 && uAlignment == PAGE_SIZE
429 && g_pfnrtMmAllocatePagesForMdl
430 && g_pfnrtMmFreePagesFromMdl)
431 {
432 PHYSICAL_ADDRESS Zero;
433 Zero.QuadPart = 0;
434 PHYSICAL_ADDRESS HighAddr;
435 HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
436 PMDL pMdl = g_pfnrtMmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
437 if (pMdl)
438 {
439 if (MmGetMdlByteCount(pMdl) >= cb)
440 {
441 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMdl);
442 PFN_NUMBER Pfn = paPfns[0] + 1;
443 const size_t cPages = cb >> PAGE_SHIFT;
444 size_t iPage;
445 for (iPage = 1; iPage < cPages; iPage++, Pfn++)
446 if (paPfns[iPage] != Pfn)
447 break;
448 if (iPage >= cPages)
449 {
450 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb);
451 if (pMemNt)
452 {
453 pMemNt->Core.u.Phys.fAllocated = true;
454 pMemNt->Core.u.Phys.PhysBase = (RTHCPHYS)paPfns[0] << PAGE_SHIFT;
455 pMemNt->fAllocatedPagesForMdl = true;
456 pMemNt->cMdls = 1;
457 pMemNt->apMdls[0] = pMdl;
458 *ppMem = &pMemNt->Core;
459 return VINF_SUCCESS;
460 }
461 }
462 }
463 g_pfnrtMmFreePagesFromMdl(pMdl);
464 ExFreePool(pMdl);
465 }
466 }
467
468 return rtR0MemObjNativeAllocContEx(ppMem, cb, false, PhysHighest, uAlignment);
469}
470
471
472DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
473{
474 if (g_pfnrtMmAllocatePagesForMdl && g_pfnrtMmFreePagesFromMdl)
475 {
476 PHYSICAL_ADDRESS Zero;
477 Zero.QuadPart = 0;
478 PHYSICAL_ADDRESS HighAddr;
479 HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
480 PMDL pMdl = g_pfnrtMmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
481 if (pMdl)
482 {
483 if (MmGetMdlByteCount(pMdl) >= cb)
484 {
485 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS_NC, NULL, cb);
486 if (pMemNt)
487 {
488 pMemNt->fAllocatedPagesForMdl = true;
489 pMemNt->cMdls = 1;
490 pMemNt->apMdls[0] = pMdl;
491 *ppMem = &pMemNt->Core;
492 return VINF_SUCCESS;
493 }
494 }
495 g_pfnrtMmFreePagesFromMdl(pMdl);
496 ExFreePool(pMdl);
497 }
498 return VERR_NO_MEMORY;
499 }
500 return VERR_NOT_SUPPORTED;
501}
502
503
504DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
505{
506 AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE || uCachePolicy == RTMEM_CACHE_POLICY_MMIO, VERR_NOT_SUPPORTED);
507
508 /*
509 * Validate the address range and create a descriptor for it.
510 */
511 PFN_NUMBER Pfn = (PFN_NUMBER)(Phys >> PAGE_SHIFT);
512 if (((RTHCPHYS)Pfn << PAGE_SHIFT) != Phys)
513 return VERR_ADDRESS_TOO_BIG;
514
515 /*
516 * Create the IPRT memory object.
517 */
518 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb);
519 if (pMemNt)
520 {
521 pMemNt->Core.u.Phys.PhysBase = Phys;
522 pMemNt->Core.u.Phys.fAllocated = false;
523 pMemNt->Core.u.Phys.uCachePolicy = uCachePolicy;
524 *ppMem = &pMemNt->Core;
525 return VINF_SUCCESS;
526 }
527 return VERR_NO_MEMORY;
528}
529
530
531/**
532 * Internal worker for locking down pages.
533 *
534 * @return IPRT status code.
535 *
536 * @param ppMem Where to store the memory object pointer.
537 * @param pv First page.
538 * @param cb Number of bytes.
539 * @param fAccess The desired access, a combination of RTMEM_PROT_READ
540 * and RTMEM_PROT_WRITE.
541 * @param R0Process The process \a pv and \a cb refers to.
542 */
543static int rtR0MemObjNtLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
544{
545 /*
546 * Calc the number of MDLs we need and allocate the memory object structure.
547 */
548 size_t cMdls = cb / MAX_LOCK_MEM_SIZE;
549 if (cb % MAX_LOCK_MEM_SIZE)
550 cMdls++;
551 if (cMdls >= UINT32_MAX)
552 return VERR_OUT_OF_RANGE;
553 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(RT_UOFFSETOF_DYN(RTR0MEMOBJNT, apMdls[cMdls]),
554 RTR0MEMOBJTYPE_LOCK, pv, cb);
555 if (!pMemNt)
556 return VERR_NO_MEMORY;
557
558 /*
559 * Loop locking down the sub parts of the memory.
560 */
561 int rc = VINF_SUCCESS;
562 size_t cbTotal = 0;
563 uint8_t *pb = (uint8_t *)pv;
564 uint32_t iMdl;
565 for (iMdl = 0; iMdl < cMdls; iMdl++)
566 {
567 /*
568 * Calc the Mdl size and allocate it.
569 */
570 size_t cbCur = cb - cbTotal;
571 if (cbCur > MAX_LOCK_MEM_SIZE)
572 cbCur = MAX_LOCK_MEM_SIZE;
573 AssertMsg(cbCur, ("cbCur: 0!\n"));
574 PMDL pMdl = IoAllocateMdl(pb, (ULONG)cbCur, FALSE, FALSE, NULL);
575 if (!pMdl)
576 {
577 rc = VERR_NO_MEMORY;
578 break;
579 }
580
581 /*
582 * Lock the pages.
583 */
584 __try
585 {
586 MmProbeAndLockPages(pMdl,
587 R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode,
588 fAccess == RTMEM_PROT_READ
589 ? IoReadAccess
590 : fAccess == RTMEM_PROT_WRITE
591 ? IoWriteAccess
592 : IoModifyAccess);
593
594 pMemNt->apMdls[iMdl] = pMdl;
595 pMemNt->cMdls++;
596 }
597 __except(EXCEPTION_EXECUTE_HANDLER)
598 {
599 IoFreeMdl(pMdl);
600 rc = VERR_LOCK_FAILED;
601 break;
602 }
603
604 if ( R0Process != NIL_RTR0PROCESS
605 && g_pfnrtMmSecureVirtualMemory
606 && g_pfnrtMmUnsecureVirtualMemory)
607 {
608 /* Make sure the user process can't change the allocation. */
609 pMemNt->pvSecureMem = g_pfnrtMmSecureVirtualMemory(pv, cb,
610 fAccess & RTMEM_PROT_WRITE
611 ? PAGE_READWRITE
612 : PAGE_READONLY);
613 if (!pMemNt->pvSecureMem)
614 {
615 rc = VERR_NO_MEMORY;
616 break;
617 }
618 }
619
620 /* next */
621 cbTotal += cbCur;
622 pb += cbCur;
623 }
624 if (RT_SUCCESS(rc))
625 {
626 Assert(pMemNt->cMdls == cMdls);
627 pMemNt->Core.u.Lock.R0Process = R0Process;
628 *ppMem = &pMemNt->Core;
629 return rc;
630 }
631
632 /*
633 * We failed, perform cleanups.
634 */
635 while (iMdl-- > 0)
636 {
637 MmUnlockPages(pMemNt->apMdls[iMdl]);
638 IoFreeMdl(pMemNt->apMdls[iMdl]);
639 pMemNt->apMdls[iMdl] = NULL;
640 }
641 if (pMemNt->pvSecureMem)
642 {
643 if (g_pfnrtMmUnsecureVirtualMemory)
644 g_pfnrtMmUnsecureVirtualMemory(pMemNt->pvSecureMem);
645 pMemNt->pvSecureMem = NULL;
646 }
647
648 rtR0MemObjDelete(&pMemNt->Core);
649 return rc;
650}
651
652
653DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess,
654 RTR0PROCESS R0Process)
655{
656 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
657 /* (Can use MmProbeAndLockProcessPages if we need to mess with other processes later.) */
658 return rtR0MemObjNtLock(ppMem, (void *)R3Ptr, cb, fAccess, R0Process);
659}
660
661
662DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
663{
664 return rtR0MemObjNtLock(ppMem, pv, cb, fAccess, NIL_RTR0PROCESS);
665}
666
667
668DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
669{
670 /*
671 * MmCreateSection(SEC_RESERVE) + MmMapViewInSystemSpace perhaps?
672 */
673 RT_NOREF4(ppMem, pvFixed, cb, uAlignment);
674 return VERR_NOT_SUPPORTED;
675}
676
677
678DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment,
679 RTR0PROCESS R0Process)
680{
681 /*
682 * ZeCreateSection(SEC_RESERVE) + ZwMapViewOfSection perhaps?
683 */
684 RT_NOREF5(ppMem, R3PtrFixed, cb, uAlignment, R0Process);
685 return VERR_NOT_SUPPORTED;
686}
687
688
689/**
690 * Internal worker for rtR0MemObjNativeMapKernel and rtR0MemObjNativeMapUser.
691 *
692 * @returns IPRT status code.
693 * @param ppMem Where to store the memory object for the mapping.
694 * @param pMemToMap The memory object to map.
695 * @param pvFixed Where to map it. (void *)-1 if anywhere is fine.
696 * @param uAlignment The alignment requirement for the mapping.
697 * @param fProt The desired page protection for the mapping.
698 * @param R0Process If NIL_RTR0PROCESS map into system (kernel) memory.
699 * If not nil, it's the current process.
700 */
701static int rtR0MemObjNtMap(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
702 unsigned fProt, RTR0PROCESS R0Process)
703{
704 int rc = VERR_MAP_FAILED;
705
706 /*
707 * Check that the specified alignment is supported.
708 */
709 if (uAlignment > PAGE_SIZE)
710 return VERR_NOT_SUPPORTED;
711
712 /*
713 * There are two basic cases here, either we've got an MDL and can
714 * map it using MmMapLockedPages, or we've got a contiguous physical
715 * range (MMIO most likely) and can use MmMapIoSpace.
716 */
717 PRTR0MEMOBJNT pMemNtToMap = (PRTR0MEMOBJNT)pMemToMap;
718 if (pMemNtToMap->cMdls)
719 {
720 /* don't attempt map locked regions with more than one mdl. */
721 if (pMemNtToMap->cMdls != 1)
722 return VERR_NOT_SUPPORTED;
723
724 /* Need g_pfnrtMmMapLockedPagesSpecifyCache to map to a specific address. */
725 if (pvFixed != (void *)-1 && g_pfnrtMmMapLockedPagesSpecifyCache == NULL)
726 return VERR_NOT_SUPPORTED;
727
728 /* we can't map anything to the first page, sorry. */
729 if (pvFixed == 0)
730 return VERR_NOT_SUPPORTED;
731
732 /* only one system mapping for now - no time to figure out MDL restrictions right now. */
733 if ( pMemNtToMap->Core.uRel.Parent.cMappings
734 && R0Process == NIL_RTR0PROCESS)
735 return VERR_NOT_SUPPORTED;
736
737 __try
738 {
739 /** @todo uAlignment */
740 /** @todo How to set the protection on the pages? */
741 void *pv;
742 if (g_pfnrtMmMapLockedPagesSpecifyCache)
743 pv = g_pfnrtMmMapLockedPagesSpecifyCache(pMemNtToMap->apMdls[0],
744 R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode,
745 MmCached,
746 pvFixed != (void *)-1 ? pvFixed : NULL,
747 FALSE /* no bug check on failure */,
748 NormalPagePriority);
749 else
750 pv = MmMapLockedPages(pMemNtToMap->apMdls[0],
751 R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode);
752 if (pv)
753 {
754 NOREF(fProt);
755
756 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING, pv,
757 pMemNtToMap->Core.cb);
758 if (pMemNt)
759 {
760 pMemNt->Core.u.Mapping.R0Process = R0Process;
761 *ppMem = &pMemNt->Core;
762 return VINF_SUCCESS;
763 }
764
765 rc = VERR_NO_MEMORY;
766 MmUnmapLockedPages(pv, pMemNtToMap->apMdls[0]);
767 }
768 }
769 __except(EXCEPTION_EXECUTE_HANDLER)
770 {
771#ifdef LOG_ENABLED
772 NTSTATUS rcNt = GetExceptionCode();
773 Log(("rtR0MemObjNtMap: Exception Code %#x\n", rcNt));
774#endif
775
776 /* nothing */
777 rc = VERR_MAP_FAILED;
778 }
779
780 }
781 else
782 {
783 AssertReturn( pMemNtToMap->Core.enmType == RTR0MEMOBJTYPE_PHYS
784 && !pMemNtToMap->Core.u.Phys.fAllocated, VERR_INTERNAL_ERROR);
785
786 /* cannot map phys mem to user space (yet). */
787 if (R0Process != NIL_RTR0PROCESS)
788 return VERR_NOT_SUPPORTED;
789
790 /** @todo uAlignment */
791 /** @todo How to set the protection on the pages? */
792 PHYSICAL_ADDRESS Phys;
793 Phys.QuadPart = pMemNtToMap->Core.u.Phys.PhysBase;
794 void *pv = MmMapIoSpace(Phys, pMemNtToMap->Core.cb,
795 pMemNtToMap->Core.u.Phys.uCachePolicy == RTMEM_CACHE_POLICY_MMIO ? MmNonCached : MmCached);
796 if (pv)
797 {
798 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING, pv,
799 pMemNtToMap->Core.cb);
800 if (pMemNt)
801 {
802 pMemNt->Core.u.Mapping.R0Process = R0Process;
803 *ppMem = &pMemNt->Core;
804 return VINF_SUCCESS;
805 }
806
807 rc = VERR_NO_MEMORY;
808 MmUnmapIoSpace(pv, pMemNtToMap->Core.cb);
809 }
810 }
811
812 NOREF(uAlignment); NOREF(fProt);
813 return rc;
814}
815
816
817DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
818 unsigned fProt, size_t offSub, size_t cbSub)
819{
820 AssertMsgReturn(!offSub && !cbSub, ("%#x %#x\n", offSub, cbSub), VERR_NOT_SUPPORTED);
821 return rtR0MemObjNtMap(ppMem, pMemToMap, pvFixed, uAlignment, fProt, NIL_RTR0PROCESS);
822}
823
824
825DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
826{
827 AssertReturn(R0Process == RTR0ProcHandleSelf(), VERR_NOT_SUPPORTED);
828 return rtR0MemObjNtMap(ppMem, pMemToMap, (void *)R3PtrFixed, uAlignment, fProt, R0Process);
829}
830
831
832DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
833{
834#if 0
835 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
836#endif
837
838 /*
839 * Seems there are some issues with this MmProtectMdlSystemAddress API, so
840 * this code isn't currently enabled until we've tested it with the verifier.
841 */
842#if 0
843 /*
844 * The API we've got requires a kernel mapping.
845 */
846 if ( pMemNt->cMdls
847 && g_pfnrtMmProtectMdlSystemAddress
848 && (g_uRtNtMajorVer > 6 || (g_uRtNtMajorVer == 6 && g_uRtNtMinorVer >= 1)) /* Windows 7 and later. */
849 && pMemNt->Core.pv != NULL
850 && ( pMemNt->Core.enmType == RTR0MEMOBJTYPE_PAGE
851 || pMemNt->Core.enmType == RTR0MEMOBJTYPE_LOW
852 || pMemNt->Core.enmType == RTR0MEMOBJTYPE_CONT
853 || ( pMemNt->Core.enmType == RTR0MEMOBJTYPE_LOCK
854 && pMemNt->Core.u.Lock.R0Process == NIL_RTPROCESS)
855 || ( pMemNt->Core.enmType == RTR0MEMOBJTYPE_MAPPING
856 && pMemNt->Core.u.Mapping.R0Process == NIL_RTPROCESS) ) )
857 {
858 /* Convert the protection. */
859 LOCK_OPERATION enmLockOp;
860 ULONG fAccess;
861 switch (fProt)
862 {
863 case RTMEM_PROT_NONE:
864 fAccess = PAGE_NOACCESS;
865 enmLockOp = IoReadAccess;
866 break;
867 case RTMEM_PROT_READ:
868 fAccess = PAGE_READONLY;
869 enmLockOp = IoReadAccess;
870 break;
871 case RTMEM_PROT_WRITE:
872 case RTMEM_PROT_WRITE | RTMEM_PROT_READ:
873 fAccess = PAGE_READWRITE;
874 enmLockOp = IoModifyAccess;
875 break;
876 case RTMEM_PROT_EXEC:
877 fAccess = PAGE_EXECUTE;
878 enmLockOp = IoReadAccess;
879 break;
880 case RTMEM_PROT_EXEC | RTMEM_PROT_READ:
881 fAccess = PAGE_EXECUTE_READ;
882 enmLockOp = IoReadAccess;
883 break;
884 case RTMEM_PROT_EXEC | RTMEM_PROT_WRITE:
885 case RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ:
886 fAccess = PAGE_EXECUTE_READWRITE;
887 enmLockOp = IoModifyAccess;
888 break;
889 default:
890 AssertFailedReturn(VERR_INVALID_FLAGS);
891 }
892
893 NTSTATUS rcNt = STATUS_SUCCESS;
894# if 0 /** @todo test this against the verifier. */
895 if (offSub == 0 && pMemNt->Core.cb == cbSub)
896 {
897 uint32_t iMdl = pMemNt->cMdls;
898 while (iMdl-- > 0)
899 {
900 rcNt = g_pfnrtMmProtectMdlSystemAddress(pMemNt->apMdls[i], fAccess);
901 if (!NT_SUCCESS(rcNt))
902 break;
903 }
904 }
905 else
906# endif
907 {
908 /*
909 * We ASSUME the following here:
910 * - MmProtectMdlSystemAddress can deal with nonpaged pool memory
911 * - MmProtectMdlSystemAddress doesn't actually store anything in the MDL we pass it.
912 * - We are not required to call MmProtectMdlSystemAddress with PAGE_READWRITE for the
913 * exact same ranges prior to freeing them.
914 *
915 * So, we lock the pages temporarily, call the API and unlock them.
916 */
917 uint8_t *pbCur = (uint8_t *)pMemNt->Core.pv + offSub;
918 while (cbSub > 0 && NT_SUCCESS(rcNt))
919 {
920 size_t cbCur = cbSub;
921 if (cbCur > MAX_LOCK_MEM_SIZE)
922 cbCur = MAX_LOCK_MEM_SIZE;
923 PMDL pMdl = IoAllocateMdl(pbCur, (ULONG)cbCur, FALSE, FALSE, NULL);
924 if (pMdl)
925 {
926 __try
927 {
928 MmProbeAndLockPages(pMdl, KernelMode, enmLockOp);
929 }
930 __except(EXCEPTION_EXECUTE_HANDLER)
931 {
932 rcNt = GetExceptionCode();
933 }
934 if (NT_SUCCESS(rcNt))
935 {
936 rcNt = g_pfnrtMmProtectMdlSystemAddress(pMdl, fAccess);
937 MmUnlockPages(pMdl);
938 }
939 IoFreeMdl(pMdl);
940 }
941 else
942 rcNt = STATUS_NO_MEMORY;
943 pbCur += cbCur;
944 cbSub -= cbCur;
945 }
946 }
947
948 if (NT_SUCCESS(rcNt))
949 return VINF_SUCCESS;
950 return RTErrConvertFromNtStatus(rcNt);
951 }
952#else
953 RT_NOREF4(pMem, offSub, cbSub, fProt);
954#endif
955
956 return VERR_NOT_SUPPORTED;
957}
958
959
960DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
961{
962 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
963
964 if (pMemNt->cMdls)
965 {
966 if (pMemNt->cMdls == 1)
967 {
968 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[0]);
969 return (RTHCPHYS)paPfns[iPage] << PAGE_SHIFT;
970 }
971
972 size_t iMdl = iPage / (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
973 size_t iMdlPfn = iPage % (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
974 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[iMdl]);
975 return (RTHCPHYS)paPfns[iMdlPfn] << PAGE_SHIFT;
976 }
977
978 switch (pMemNt->Core.enmType)
979 {
980 case RTR0MEMOBJTYPE_MAPPING:
981 return rtR0MemObjNativeGetPagePhysAddr(pMemNt->Core.uRel.Child.pParent, iPage);
982
983 case RTR0MEMOBJTYPE_PHYS:
984 return pMemNt->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT);
985
986 case RTR0MEMOBJTYPE_PAGE:
987 case RTR0MEMOBJTYPE_PHYS_NC:
988 case RTR0MEMOBJTYPE_LOW:
989 case RTR0MEMOBJTYPE_CONT:
990 case RTR0MEMOBJTYPE_LOCK:
991 default:
992 AssertMsgFailed(("%d\n", pMemNt->Core.enmType));
993 case RTR0MEMOBJTYPE_RES_VIRT:
994 return NIL_RTHCPHYS;
995 }
996}
997
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette