VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/nt/memobj-r0drv-nt.cpp@ 78278

Last change on this file since 78278 was 78278, checked in by vboxsync, 6 years ago

IPRT/nt: Implemented offSub & cbSub in the NT versions of rtR0MemObjNativeMapKernel and rtR0MemObjNativeMapUser. Untested+unused, but seems not to interfer with existing usage. bugref:9217

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 37.1 KB
Line 
1/* $Id: memobj-r0drv-nt.cpp 78278 2019-04-24 16:03:59Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, NT.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#include "the-nt-kernel.h"
32
33#include <iprt/memobj.h>
34#include <iprt/alloc.h>
35#include <iprt/assert.h>
36#include <iprt/err.h>
37#include <iprt/log.h>
38#include <iprt/param.h>
39#include <iprt/string.h>
40#include <iprt/process.h>
41#include "internal/memobj.h"
42#include "internal-r0drv-nt.h"
43
44
45/*********************************************************************************************************************************
46* Defined Constants And Macros *
47*********************************************************************************************************************************/
48/** Maximum number of bytes we try to lock down in one go.
49 * This is supposed to have a limit right below 256MB, but this appears
50 * to actually be much lower. The values here have been determined experimentally.
51 */
52#ifdef RT_ARCH_X86
53# define MAX_LOCK_MEM_SIZE (32*1024*1024) /* 32MB */
54#endif
55#ifdef RT_ARCH_AMD64
56# define MAX_LOCK_MEM_SIZE (24*1024*1024) /* 24MB */
57#endif
58
59
60/*********************************************************************************************************************************
61* Structures and Typedefs *
62*********************************************************************************************************************************/
63/**
64 * The NT version of the memory object structure.
65 */
66typedef struct RTR0MEMOBJNT
67{
68 /** The core structure. */
69 RTR0MEMOBJINTERNAL Core;
70 /** Used MmAllocatePagesForMdl(). */
71 bool fAllocatedPagesForMdl;
72 /** Set if this is sub-section of the parent. */
73 bool fSubMapping;
74 /** Pointer returned by MmSecureVirtualMemory */
75 PVOID pvSecureMem;
76 /** The number of PMDLs (memory descriptor lists) in the array. */
77 uint32_t cMdls;
78 /** Array of MDL pointers. (variable size) */
79 PMDL apMdls[1];
80} RTR0MEMOBJNT, *PRTR0MEMOBJNT;
81
82
83
84DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
85{
86 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
87
88 /*
89 * Deal with it on a per type basis (just as a variation).
90 */
91 switch (pMemNt->Core.enmType)
92 {
93 case RTR0MEMOBJTYPE_LOW:
94 if (pMemNt->fAllocatedPagesForMdl)
95 {
96 Assert(pMemNt->Core.pv && pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
97 MmUnmapLockedPages(pMemNt->Core.pv, pMemNt->apMdls[0]);
98 pMemNt->Core.pv = NULL;
99 if (pMemNt->pvSecureMem)
100 {
101 g_pfnrtMmUnsecureVirtualMemory(pMemNt->pvSecureMem);
102 pMemNt->pvSecureMem = NULL;
103 }
104
105 g_pfnrtMmFreePagesFromMdl(pMemNt->apMdls[0]);
106 ExFreePool(pMemNt->apMdls[0]);
107 pMemNt->apMdls[0] = NULL;
108 pMemNt->cMdls = 0;
109 break;
110 }
111 AssertFailed();
112 break;
113
114 case RTR0MEMOBJTYPE_PAGE:
115 Assert(pMemNt->Core.pv);
116 if (g_pfnrtExFreePoolWithTag)
117 g_pfnrtExFreePoolWithTag(pMemNt->Core.pv, IPRT_NT_POOL_TAG);
118 else
119 ExFreePool(pMemNt->Core.pv);
120 pMemNt->Core.pv = NULL;
121
122 Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
123 IoFreeMdl(pMemNt->apMdls[0]);
124 pMemNt->apMdls[0] = NULL;
125 pMemNt->cMdls = 0;
126 break;
127
128 case RTR0MEMOBJTYPE_CONT:
129 Assert(pMemNt->Core.pv);
130 MmFreeContiguousMemory(pMemNt->Core.pv);
131 pMemNt->Core.pv = NULL;
132
133 Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
134 IoFreeMdl(pMemNt->apMdls[0]);
135 pMemNt->apMdls[0] = NULL;
136 pMemNt->cMdls = 0;
137 break;
138
139 case RTR0MEMOBJTYPE_PHYS:
140 /* rtR0MemObjNativeEnterPhys? */
141 if (!pMemNt->Core.u.Phys.fAllocated)
142 {
143 Assert(!pMemNt->fAllocatedPagesForMdl);
144 /* Nothing to do here. */
145 break;
146 }
147 RT_FALL_THRU();
148
149 case RTR0MEMOBJTYPE_PHYS_NC:
150 if (pMemNt->fAllocatedPagesForMdl)
151 {
152 g_pfnrtMmFreePagesFromMdl(pMemNt->apMdls[0]);
153 ExFreePool(pMemNt->apMdls[0]);
154 pMemNt->apMdls[0] = NULL;
155 pMemNt->cMdls = 0;
156 break;
157 }
158 AssertFailed();
159 break;
160
161 case RTR0MEMOBJTYPE_LOCK:
162 if (pMemNt->pvSecureMem)
163 {
164 g_pfnrtMmUnsecureVirtualMemory(pMemNt->pvSecureMem);
165 pMemNt->pvSecureMem = NULL;
166 }
167 for (uint32_t i = 0; i < pMemNt->cMdls; i++)
168 {
169 MmUnlockPages(pMemNt->apMdls[i]);
170 IoFreeMdl(pMemNt->apMdls[i]);
171 pMemNt->apMdls[i] = NULL;
172 }
173 break;
174
175 case RTR0MEMOBJTYPE_RES_VIRT:
176/* if (pMemNt->Core.u.ResVirt.R0Process == NIL_RTR0PROCESS)
177 {
178 }
179 else
180 {
181 }*/
182 AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n"));
183 return VERR_INTERNAL_ERROR;
184 break;
185
186 case RTR0MEMOBJTYPE_MAPPING:
187 {
188 PRTR0MEMOBJNT pMemNtParent = (PRTR0MEMOBJNT)pMemNt->Core.uRel.Child.pParent;
189 Assert(pMemNtParent);
190 Assert(pMemNt->Core.pv);
191 Assert((pMemNt->cMdls == 0 && !pMemNt->fSubMapping) || (pMemNt->cMdls == 1 && pMemNt->fSubMapping));
192 if (pMemNtParent->cMdls)
193 {
194 Assert(pMemNtParent->cMdls == 1 && pMemNtParent->apMdls[0]);
195 Assert( pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS
196 || pMemNt->Core.u.Mapping.R0Process == RTR0ProcHandleSelf());
197 if (!pMemNt->cMdls)
198 MmUnmapLockedPages(pMemNt->Core.pv, pMemNtParent->apMdls[0]);
199 else
200 {
201 MmUnmapLockedPages(pMemNt->Core.pv, pMemNt->apMdls[0]);
202 IoFreeMdl(pMemNt->apMdls[0]);
203 pMemNt->apMdls[0] = NULL;
204 }
205 }
206 else
207 {
208 Assert( pMemNtParent->Core.enmType == RTR0MEMOBJTYPE_PHYS
209 && !pMemNtParent->Core.u.Phys.fAllocated);
210 Assert(pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS);
211 Assert(!pMemNt->fSubMapping);
212 MmUnmapIoSpace(pMemNt->Core.pv, pMemNt->Core.cb);
213 }
214 pMemNt->Core.pv = NULL;
215 break;
216 }
217
218 default:
219 AssertMsgFailed(("enmType=%d\n", pMemNt->Core.enmType));
220 return VERR_INTERNAL_ERROR;
221 }
222
223 return VINF_SUCCESS;
224}
225
226
227DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
228{
229 AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
230 RT_NOREF1(fExecutable);
231
232 /*
233 * Try allocate the memory and create an MDL for them so
234 * we can query the physical addresses and do mappings later
235 * without running into out-of-memory conditions and similar problems.
236 */
237 int rc = VERR_NO_PAGE_MEMORY;
238 void *pv;
239 if (g_pfnrtExAllocatePoolWithTag)
240 pv = g_pfnrtExAllocatePoolWithTag(NonPagedPool, cb, IPRT_NT_POOL_TAG);
241 else
242 pv = ExAllocatePool(NonPagedPool, cb);
243 if (pv)
244 {
245 PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
246 if (pMdl)
247 {
248 MmBuildMdlForNonPagedPool(pMdl);
249#ifdef RT_ARCH_AMD64
250 MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
251#endif
252
253 /*
254 * Create the IPRT memory object.
255 */
256 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PAGE, pv, cb);
257 if (pMemNt)
258 {
259 pMemNt->cMdls = 1;
260 pMemNt->apMdls[0] = pMdl;
261 *ppMem = &pMemNt->Core;
262 return VINF_SUCCESS;
263 }
264
265 rc = VERR_NO_MEMORY;
266 IoFreeMdl(pMdl);
267 }
268 ExFreePool(pv);
269 }
270 return rc;
271}
272
273
274DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
275{
276 AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
277
278 /*
279 * Try see if we get lucky first...
280 * (We could probably just assume we're lucky on NT4.)
281 */
282 int rc = rtR0MemObjNativeAllocPage(ppMem, cb, fExecutable);
283 if (RT_SUCCESS(rc))
284 {
285 size_t iPage = cb >> PAGE_SHIFT;
286 while (iPage-- > 0)
287 if (rtR0MemObjNativeGetPagePhysAddr(*ppMem, iPage) >= _4G)
288 {
289 rc = VERR_NO_LOW_MEMORY;
290 break;
291 }
292 if (RT_SUCCESS(rc))
293 return rc;
294
295 /* The following ASSUMES that rtR0MemObjNativeAllocPage returns a completed object. */
296 RTR0MemObjFree(*ppMem, false);
297 *ppMem = NULL;
298 }
299
300 /*
301 * Use MmAllocatePagesForMdl to specify the range of physical addresses we wish to use.
302 */
303 if ( g_pfnrtMmAllocatePagesForMdl
304 && g_pfnrtMmFreePagesFromMdl
305 && g_pfnrtMmMapLockedPagesSpecifyCache)
306 {
307 PHYSICAL_ADDRESS Zero;
308 Zero.QuadPart = 0;
309 PHYSICAL_ADDRESS HighAddr;
310 HighAddr.QuadPart = _4G - 1;
311 PMDL pMdl = g_pfnrtMmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
312 if (pMdl)
313 {
314 if (MmGetMdlByteCount(pMdl) >= cb)
315 {
316 __try
317 {
318 void *pv = g_pfnrtMmMapLockedPagesSpecifyCache(pMdl, KernelMode, MmCached, NULL /* no base address */,
319 FALSE /* no bug check on failure */, NormalPagePriority);
320 if (pv)
321 {
322 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_LOW, pv, cb);
323 if (pMemNt)
324 {
325 pMemNt->fAllocatedPagesForMdl = true;
326 pMemNt->cMdls = 1;
327 pMemNt->apMdls[0] = pMdl;
328 *ppMem = &pMemNt->Core;
329 return VINF_SUCCESS;
330 }
331 MmUnmapLockedPages(pv, pMdl);
332 }
333 }
334 __except(EXCEPTION_EXECUTE_HANDLER)
335 {
336# ifdef LOG_ENABLED
337 NTSTATUS rcNt = GetExceptionCode();
338 Log(("rtR0MemObjNativeAllocLow: Exception Code %#x\n", rcNt));
339# endif
340 /* nothing */
341 }
342 }
343 g_pfnrtMmFreePagesFromMdl(pMdl);
344 ExFreePool(pMdl);
345 }
346 }
347
348 /*
349 * Fall back on contiguous memory...
350 */
351 return rtR0MemObjNativeAllocCont(ppMem, cb, fExecutable);
352}
353
354
355/**
356 * Internal worker for rtR0MemObjNativeAllocCont(), rtR0MemObjNativeAllocPhys()
357 * and rtR0MemObjNativeAllocPhysNC() that takes a max physical address in addition
358 * to what rtR0MemObjNativeAllocCont() does.
359 *
360 * @returns IPRT status code.
361 * @param ppMem Where to store the pointer to the ring-0 memory object.
362 * @param cb The size.
363 * @param fExecutable Whether the mapping should be executable or not.
364 * @param PhysHighest The highest physical address for the pages in allocation.
365 * @param uAlignment The alignment of the physical memory to allocate.
366 * Supported values are PAGE_SIZE, _2M, _4M and _1G.
367 */
368static int rtR0MemObjNativeAllocContEx(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, RTHCPHYS PhysHighest,
369 size_t uAlignment)
370{
371 AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
372 RT_NOREF1(fExecutable);
373
374 /*
375 * Allocate the memory and create an MDL for it.
376 */
377 PHYSICAL_ADDRESS PhysAddrHighest;
378 PhysAddrHighest.QuadPart = PhysHighest;
379 void *pv;
380 if (g_pfnrtMmAllocateContiguousMemorySpecifyCache)
381 {
382 PHYSICAL_ADDRESS PhysAddrLowest, PhysAddrBoundary;
383 PhysAddrLowest.QuadPart = 0;
384 PhysAddrBoundary.QuadPart = (uAlignment == PAGE_SIZE) ? 0 : uAlignment;
385 pv = g_pfnrtMmAllocateContiguousMemorySpecifyCache(cb, PhysAddrLowest, PhysAddrHighest, PhysAddrBoundary, MmCached);
386 }
387 else if (uAlignment == PAGE_SIZE)
388 pv = MmAllocateContiguousMemory(cb, PhysAddrHighest);
389 else
390 return VERR_NOT_SUPPORTED;
391 if (!pv)
392 return VERR_NO_MEMORY;
393
394 PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
395 if (pMdl)
396 {
397 MmBuildMdlForNonPagedPool(pMdl);
398#ifdef RT_ARCH_AMD64
399 MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
400#endif
401
402 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_CONT, pv, cb);
403 if (pMemNt)
404 {
405 pMemNt->Core.u.Cont.Phys = (RTHCPHYS)*MmGetMdlPfnArray(pMdl) << PAGE_SHIFT;
406 pMemNt->cMdls = 1;
407 pMemNt->apMdls[0] = pMdl;
408 *ppMem = &pMemNt->Core;
409 return VINF_SUCCESS;
410 }
411
412 IoFreeMdl(pMdl);
413 }
414 MmFreeContiguousMemory(pv);
415 return VERR_NO_MEMORY;
416}
417
418
419DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
420{
421 return rtR0MemObjNativeAllocContEx(ppMem, cb, fExecutable, _4G-1, PAGE_SIZE /* alignment */);
422}
423
424
425DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
426{
427 /*
428 * Try and see if we're lucky and get a contiguous chunk from MmAllocatePagesForMdl.
429 *
430 * This is preferable to using MmAllocateContiguousMemory because there are
431 * a few situations where the memory shouldn't be mapped, like for instance
432 * VT-x control memory. Since these are rather small allocations (one or
433 * two pages) MmAllocatePagesForMdl will probably be able to satisfy the
434 * request.
435 *
436 * If the allocation is big, the chances are *probably* not very good. The
437 * current limit is kind of random...
438 */
439 if ( cb < _128K
440 && uAlignment == PAGE_SIZE
441 && g_pfnrtMmAllocatePagesForMdl
442 && g_pfnrtMmFreePagesFromMdl)
443 {
444 PHYSICAL_ADDRESS Zero;
445 Zero.QuadPart = 0;
446 PHYSICAL_ADDRESS HighAddr;
447 HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
448 PMDL pMdl = g_pfnrtMmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
449 if (pMdl)
450 {
451 if (MmGetMdlByteCount(pMdl) >= cb)
452 {
453 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMdl);
454 PFN_NUMBER Pfn = paPfns[0] + 1;
455 const size_t cPages = cb >> PAGE_SHIFT;
456 size_t iPage;
457 for (iPage = 1; iPage < cPages; iPage++, Pfn++)
458 if (paPfns[iPage] != Pfn)
459 break;
460 if (iPage >= cPages)
461 {
462 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb);
463 if (pMemNt)
464 {
465 pMemNt->Core.u.Phys.fAllocated = true;
466 pMemNt->Core.u.Phys.PhysBase = (RTHCPHYS)paPfns[0] << PAGE_SHIFT;
467 pMemNt->fAllocatedPagesForMdl = true;
468 pMemNt->cMdls = 1;
469 pMemNt->apMdls[0] = pMdl;
470 *ppMem = &pMemNt->Core;
471 return VINF_SUCCESS;
472 }
473 }
474 }
475 g_pfnrtMmFreePagesFromMdl(pMdl);
476 ExFreePool(pMdl);
477 }
478 }
479
480 return rtR0MemObjNativeAllocContEx(ppMem, cb, false, PhysHighest, uAlignment);
481}
482
483
484DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
485{
486 if (g_pfnrtMmAllocatePagesForMdl && g_pfnrtMmFreePagesFromMdl)
487 {
488 PHYSICAL_ADDRESS Zero;
489 Zero.QuadPart = 0;
490 PHYSICAL_ADDRESS HighAddr;
491 HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
492 PMDL pMdl = g_pfnrtMmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
493 if (pMdl)
494 {
495 if (MmGetMdlByteCount(pMdl) >= cb)
496 {
497 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS_NC, NULL, cb);
498 if (pMemNt)
499 {
500 pMemNt->fAllocatedPagesForMdl = true;
501 pMemNt->cMdls = 1;
502 pMemNt->apMdls[0] = pMdl;
503 *ppMem = &pMemNt->Core;
504 return VINF_SUCCESS;
505 }
506 }
507 g_pfnrtMmFreePagesFromMdl(pMdl);
508 ExFreePool(pMdl);
509 }
510 return VERR_NO_MEMORY;
511 }
512 return VERR_NOT_SUPPORTED;
513}
514
515
516DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
517{
518 AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE || uCachePolicy == RTMEM_CACHE_POLICY_MMIO, VERR_NOT_SUPPORTED);
519
520 /*
521 * Validate the address range and create a descriptor for it.
522 */
523 PFN_NUMBER Pfn = (PFN_NUMBER)(Phys >> PAGE_SHIFT);
524 if (((RTHCPHYS)Pfn << PAGE_SHIFT) != Phys)
525 return VERR_ADDRESS_TOO_BIG;
526
527 /*
528 * Create the IPRT memory object.
529 */
530 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb);
531 if (pMemNt)
532 {
533 pMemNt->Core.u.Phys.PhysBase = Phys;
534 pMemNt->Core.u.Phys.fAllocated = false;
535 pMemNt->Core.u.Phys.uCachePolicy = uCachePolicy;
536 *ppMem = &pMemNt->Core;
537 return VINF_SUCCESS;
538 }
539 return VERR_NO_MEMORY;
540}
541
542
543/**
544 * Internal worker for locking down pages.
545 *
546 * @return IPRT status code.
547 *
548 * @param ppMem Where to store the memory object pointer.
549 * @param pv First page.
550 * @param cb Number of bytes.
551 * @param fAccess The desired access, a combination of RTMEM_PROT_READ
552 * and RTMEM_PROT_WRITE.
553 * @param R0Process The process \a pv and \a cb refers to.
554 */
555static int rtR0MemObjNtLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
556{
557 /*
558 * Calc the number of MDLs we need and allocate the memory object structure.
559 */
560 size_t cMdls = cb / MAX_LOCK_MEM_SIZE;
561 if (cb % MAX_LOCK_MEM_SIZE)
562 cMdls++;
563 if (cMdls >= UINT32_MAX)
564 return VERR_OUT_OF_RANGE;
565 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(RT_UOFFSETOF_DYN(RTR0MEMOBJNT, apMdls[cMdls]),
566 RTR0MEMOBJTYPE_LOCK, pv, cb);
567 if (!pMemNt)
568 return VERR_NO_MEMORY;
569
570 /*
571 * Loop locking down the sub parts of the memory.
572 */
573 int rc = VINF_SUCCESS;
574 size_t cbTotal = 0;
575 uint8_t *pb = (uint8_t *)pv;
576 uint32_t iMdl;
577 for (iMdl = 0; iMdl < cMdls; iMdl++)
578 {
579 /*
580 * Calc the Mdl size and allocate it.
581 */
582 size_t cbCur = cb - cbTotal;
583 if (cbCur > MAX_LOCK_MEM_SIZE)
584 cbCur = MAX_LOCK_MEM_SIZE;
585 AssertMsg(cbCur, ("cbCur: 0!\n"));
586 PMDL pMdl = IoAllocateMdl(pb, (ULONG)cbCur, FALSE, FALSE, NULL);
587 if (!pMdl)
588 {
589 rc = VERR_NO_MEMORY;
590 break;
591 }
592
593 /*
594 * Lock the pages.
595 */
596 __try
597 {
598 MmProbeAndLockPages(pMdl,
599 R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode,
600 fAccess == RTMEM_PROT_READ
601 ? IoReadAccess
602 : fAccess == RTMEM_PROT_WRITE
603 ? IoWriteAccess
604 : IoModifyAccess);
605
606 pMemNt->apMdls[iMdl] = pMdl;
607 pMemNt->cMdls++;
608 }
609 __except(EXCEPTION_EXECUTE_HANDLER)
610 {
611 IoFreeMdl(pMdl);
612 rc = VERR_LOCK_FAILED;
613 break;
614 }
615
616 if ( R0Process != NIL_RTR0PROCESS
617 && g_pfnrtMmSecureVirtualMemory
618 && g_pfnrtMmUnsecureVirtualMemory)
619 {
620 /* Make sure the user process can't change the allocation. */
621 pMemNt->pvSecureMem = g_pfnrtMmSecureVirtualMemory(pv, cb,
622 fAccess & RTMEM_PROT_WRITE
623 ? PAGE_READWRITE
624 : PAGE_READONLY);
625 if (!pMemNt->pvSecureMem)
626 {
627 rc = VERR_NO_MEMORY;
628 break;
629 }
630 }
631
632 /* next */
633 cbTotal += cbCur;
634 pb += cbCur;
635 }
636 if (RT_SUCCESS(rc))
637 {
638 Assert(pMemNt->cMdls == cMdls);
639 pMemNt->Core.u.Lock.R0Process = R0Process;
640 *ppMem = &pMemNt->Core;
641 return rc;
642 }
643
644 /*
645 * We failed, perform cleanups.
646 */
647 while (iMdl-- > 0)
648 {
649 MmUnlockPages(pMemNt->apMdls[iMdl]);
650 IoFreeMdl(pMemNt->apMdls[iMdl]);
651 pMemNt->apMdls[iMdl] = NULL;
652 }
653 if (pMemNt->pvSecureMem)
654 {
655 if (g_pfnrtMmUnsecureVirtualMemory)
656 g_pfnrtMmUnsecureVirtualMemory(pMemNt->pvSecureMem);
657 pMemNt->pvSecureMem = NULL;
658 }
659
660 rtR0MemObjDelete(&pMemNt->Core);
661 return rc;
662}
663
664
665DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess,
666 RTR0PROCESS R0Process)
667{
668 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
669 /* (Can use MmProbeAndLockProcessPages if we need to mess with other processes later.) */
670 return rtR0MemObjNtLock(ppMem, (void *)R3Ptr, cb, fAccess, R0Process);
671}
672
673
674DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
675{
676 return rtR0MemObjNtLock(ppMem, pv, cb, fAccess, NIL_RTR0PROCESS);
677}
678
679
680DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
681{
682 /*
683 * MmCreateSection(SEC_RESERVE) + MmMapViewInSystemSpace perhaps?
684 */
685 RT_NOREF4(ppMem, pvFixed, cb, uAlignment);
686 return VERR_NOT_SUPPORTED;
687}
688
689
690DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment,
691 RTR0PROCESS R0Process)
692{
693 /*
694 * ZeCreateSection(SEC_RESERVE) + ZwMapViewOfSection perhaps?
695 */
696 RT_NOREF5(ppMem, R3PtrFixed, cb, uAlignment, R0Process);
697 return VERR_NOT_SUPPORTED;
698}
699
700
701/**
702 * Internal worker for rtR0MemObjNativeMapKernel and rtR0MemObjNativeMapUser.
703 *
704 * @returns IPRT status code.
705 * @param ppMem Where to store the memory object for the mapping.
706 * @param pMemToMap The memory object to map.
707 * @param pvFixed Where to map it. (void *)-1 if anywhere is fine.
708 * @param uAlignment The alignment requirement for the mapping.
709 * @param fProt The desired page protection for the mapping.
710 * @param R0Process If NIL_RTR0PROCESS map into system (kernel) memory.
711 * If not nil, it's the current process.
712 */
713static int rtR0MemObjNtMap(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
714 unsigned fProt, RTR0PROCESS R0Process, size_t offSub, size_t cbSub)
715{
716 int rc = VERR_MAP_FAILED;
717
718 /*
719 * Check that the specified alignment is supported.
720 */
721 if (uAlignment > PAGE_SIZE)
722 return VERR_NOT_SUPPORTED;
723
724 /*
725 * There are two basic cases here, either we've got an MDL and can
726 * map it using MmMapLockedPages, or we've got a contiguous physical
727 * range (MMIO most likely) and can use MmMapIoSpace.
728 */
729 PRTR0MEMOBJNT pMemNtToMap = (PRTR0MEMOBJNT)pMemToMap;
730 if (pMemNtToMap->cMdls)
731 {
732 /* don't attempt map locked regions with more than one mdl. */
733 if (pMemNtToMap->cMdls != 1)
734 return VERR_NOT_SUPPORTED;
735
736 /* Need g_pfnrtMmMapLockedPagesSpecifyCache to map to a specific address. */
737 if (pvFixed != (void *)-1 && g_pfnrtMmMapLockedPagesSpecifyCache == NULL)
738 return VERR_NOT_SUPPORTED;
739
740 /* we can't map anything to the first page, sorry. */
741 if (pvFixed == 0)
742 return VERR_NOT_SUPPORTED;
743
744 /* only one system mapping for now - no time to figure out MDL restrictions right now. */
745 if ( pMemNtToMap->Core.uRel.Parent.cMappings
746 && R0Process == NIL_RTR0PROCESS)
747 {
748 if (pMemNtToMap->Core.enmType != RTR0MEMOBJTYPE_PHYS_NC)
749 return VERR_NOT_SUPPORTED;
750 uint32_t iMapping = pMemNtToMap->Core.uRel.Parent.cMappings;
751 while (iMapping-- > 0)
752 {
753 PRTR0MEMOBJNT pMapping = (PRTR0MEMOBJNT)pMemNtToMap->Core.uRel.Parent.papMappings[iMapping];
754 if ( pMapping->Core.enmType != RTR0MEMOBJTYPE_MAPPING
755 || pMapping->Core.u.Mapping.R0Process == NIL_RTR0PROCESS)
756 return VERR_NOT_SUPPORTED;
757 }
758 }
759
760 /* Create a partial MDL if this is a sub-range request. */
761 PMDL pMdl;
762 if (!offSub && !cbSub)
763 pMdl = pMemNtToMap->apMdls[0];
764 else
765 {
766 pMdl = IoAllocateMdl(NULL, (ULONG)cbSub, FALSE, FALSE, NULL);
767 if (pMdl)
768 IoBuildPartialMdl(pMemNtToMap->apMdls[0], pMdl,
769 (uint8_t *)MmGetMdlVirtualAddress(pMemNtToMap->apMdls[0]) + offSub, (ULONG)cbSub);
770 else
771 {
772 IoFreeMdl(pMdl);
773 return VERR_NO_MEMORY;
774 }
775 }
776
777 __try
778 {
779 /** @todo uAlignment */
780 /** @todo How to set the protection on the pages? */
781 void *pv;
782 if (g_pfnrtMmMapLockedPagesSpecifyCache)
783 pv = g_pfnrtMmMapLockedPagesSpecifyCache(pMdl,
784 R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode,
785 MmCached,
786 pvFixed != (void *)-1 ? pvFixed : NULL,
787 FALSE /* no bug check on failure */,
788 NormalPagePriority);
789 else
790 pv = MmMapLockedPages(pMdl, R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode);
791 if (pv)
792 {
793 NOREF(fProt);
794
795 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew( !offSub && !cbSub
796 ? sizeof(*pMemNt) : RT_UOFFSETOF_DYN(RTR0MEMOBJNT, apMdls[1]),
797 RTR0MEMOBJTYPE_MAPPING, pv, pMemNtToMap->Core.cb);
798 if (pMemNt)
799 {
800 pMemNt->Core.u.Mapping.R0Process = R0Process;
801 if (!offSub && !cbSub)
802 pMemNt->fSubMapping = false;
803 else
804 {
805 pMemNt->apMdls[0] = pMdl;
806 pMemNt->cMdls = 1;
807 pMemNt->fSubMapping = true;
808 }
809
810 *ppMem = &pMemNt->Core;
811 return VINF_SUCCESS;
812 }
813
814 rc = VERR_NO_MEMORY;
815 MmUnmapLockedPages(pv, pMdl);
816 }
817 }
818 __except(EXCEPTION_EXECUTE_HANDLER)
819 {
820#ifdef LOG_ENABLED
821 NTSTATUS rcNt = GetExceptionCode();
822 Log(("rtR0MemObjNtMap: Exception Code %#x\n", rcNt));
823#endif
824
825 /* nothing */
826 rc = VERR_MAP_FAILED;
827 }
828
829 }
830 else
831 {
832 AssertReturn( pMemNtToMap->Core.enmType == RTR0MEMOBJTYPE_PHYS
833 && !pMemNtToMap->Core.u.Phys.fAllocated, VERR_INTERNAL_ERROR);
834
835 /* cannot map phys mem to user space (yet). */
836 if (R0Process != NIL_RTR0PROCESS)
837 return VERR_NOT_SUPPORTED;
838
839 /* Cannot sub-mak these (yet). */
840 AssertMsgReturn(!offSub && !cbSub, ("%#zx %#zx\n", offSub, cbSub), VERR_NOT_SUPPORTED);
841
842
843 /** @todo uAlignment */
844 /** @todo How to set the protection on the pages? */
845 PHYSICAL_ADDRESS Phys;
846 Phys.QuadPart = pMemNtToMap->Core.u.Phys.PhysBase;
847 void *pv = MmMapIoSpace(Phys, pMemNtToMap->Core.cb,
848 pMemNtToMap->Core.u.Phys.uCachePolicy == RTMEM_CACHE_POLICY_MMIO ? MmNonCached : MmCached);
849 if (pv)
850 {
851 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING, pv,
852 pMemNtToMap->Core.cb);
853 if (pMemNt)
854 {
855 pMemNt->Core.u.Mapping.R0Process = R0Process;
856 *ppMem = &pMemNt->Core;
857 return VINF_SUCCESS;
858 }
859
860 rc = VERR_NO_MEMORY;
861 MmUnmapIoSpace(pv, pMemNtToMap->Core.cb);
862 }
863 }
864
865 NOREF(uAlignment); NOREF(fProt);
866 return rc;
867}
868
869
870DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
871 unsigned fProt, size_t offSub, size_t cbSub)
872{
873 return rtR0MemObjNtMap(ppMem, pMemToMap, pvFixed, uAlignment, fProt, NIL_RTR0PROCESS, offSub, cbSub);
874}
875
876
877DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment,
878 unsigned fProt, RTR0PROCESS R0Process, size_t offSub, size_t cbSub)
879{
880 AssertReturn(R0Process == RTR0ProcHandleSelf(), VERR_NOT_SUPPORTED);
881 return rtR0MemObjNtMap(ppMem, pMemToMap, (void *)R3PtrFixed, uAlignment, fProt, R0Process, offSub, cbSub);
882}
883
884
885DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
886{
887#if 0
888 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
889#endif
890
891 /*
892 * Seems there are some issues with this MmProtectMdlSystemAddress API, so
893 * this code isn't currently enabled until we've tested it with the verifier.
894 */
895#if 0
896 /*
897 * The API we've got requires a kernel mapping.
898 */
899 if ( pMemNt->cMdls
900 && g_pfnrtMmProtectMdlSystemAddress
901 && (g_uRtNtMajorVer > 6 || (g_uRtNtMajorVer == 6 && g_uRtNtMinorVer >= 1)) /* Windows 7 and later. */
902 && pMemNt->Core.pv != NULL
903 && ( pMemNt->Core.enmType == RTR0MEMOBJTYPE_PAGE
904 || pMemNt->Core.enmType == RTR0MEMOBJTYPE_LOW
905 || pMemNt->Core.enmType == RTR0MEMOBJTYPE_CONT
906 || ( pMemNt->Core.enmType == RTR0MEMOBJTYPE_LOCK
907 && pMemNt->Core.u.Lock.R0Process == NIL_RTPROCESS)
908 || ( pMemNt->Core.enmType == RTR0MEMOBJTYPE_MAPPING
909 && pMemNt->Core.u.Mapping.R0Process == NIL_RTPROCESS) ) )
910 {
911 /* Convert the protection. */
912 LOCK_OPERATION enmLockOp;
913 ULONG fAccess;
914 switch (fProt)
915 {
916 case RTMEM_PROT_NONE:
917 fAccess = PAGE_NOACCESS;
918 enmLockOp = IoReadAccess;
919 break;
920 case RTMEM_PROT_READ:
921 fAccess = PAGE_READONLY;
922 enmLockOp = IoReadAccess;
923 break;
924 case RTMEM_PROT_WRITE:
925 case RTMEM_PROT_WRITE | RTMEM_PROT_READ:
926 fAccess = PAGE_READWRITE;
927 enmLockOp = IoModifyAccess;
928 break;
929 case RTMEM_PROT_EXEC:
930 fAccess = PAGE_EXECUTE;
931 enmLockOp = IoReadAccess;
932 break;
933 case RTMEM_PROT_EXEC | RTMEM_PROT_READ:
934 fAccess = PAGE_EXECUTE_READ;
935 enmLockOp = IoReadAccess;
936 break;
937 case RTMEM_PROT_EXEC | RTMEM_PROT_WRITE:
938 case RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ:
939 fAccess = PAGE_EXECUTE_READWRITE;
940 enmLockOp = IoModifyAccess;
941 break;
942 default:
943 AssertFailedReturn(VERR_INVALID_FLAGS);
944 }
945
946 NTSTATUS rcNt = STATUS_SUCCESS;
947# if 0 /** @todo test this against the verifier. */
948 if (offSub == 0 && pMemNt->Core.cb == cbSub)
949 {
950 uint32_t iMdl = pMemNt->cMdls;
951 while (iMdl-- > 0)
952 {
953 rcNt = g_pfnrtMmProtectMdlSystemAddress(pMemNt->apMdls[i], fAccess);
954 if (!NT_SUCCESS(rcNt))
955 break;
956 }
957 }
958 else
959# endif
960 {
961 /*
962 * We ASSUME the following here:
963 * - MmProtectMdlSystemAddress can deal with nonpaged pool memory
964 * - MmProtectMdlSystemAddress doesn't actually store anything in the MDL we pass it.
965 * - We are not required to call MmProtectMdlSystemAddress with PAGE_READWRITE for the
966 * exact same ranges prior to freeing them.
967 *
968 * So, we lock the pages temporarily, call the API and unlock them.
969 */
970 uint8_t *pbCur = (uint8_t *)pMemNt->Core.pv + offSub;
971 while (cbSub > 0 && NT_SUCCESS(rcNt))
972 {
973 size_t cbCur = cbSub;
974 if (cbCur > MAX_LOCK_MEM_SIZE)
975 cbCur = MAX_LOCK_MEM_SIZE;
976 PMDL pMdl = IoAllocateMdl(pbCur, (ULONG)cbCur, FALSE, FALSE, NULL);
977 if (pMdl)
978 {
979 __try
980 {
981 MmProbeAndLockPages(pMdl, KernelMode, enmLockOp);
982 }
983 __except(EXCEPTION_EXECUTE_HANDLER)
984 {
985 rcNt = GetExceptionCode();
986 }
987 if (NT_SUCCESS(rcNt))
988 {
989 rcNt = g_pfnrtMmProtectMdlSystemAddress(pMdl, fAccess);
990 MmUnlockPages(pMdl);
991 }
992 IoFreeMdl(pMdl);
993 }
994 else
995 rcNt = STATUS_NO_MEMORY;
996 pbCur += cbCur;
997 cbSub -= cbCur;
998 }
999 }
1000
1001 if (NT_SUCCESS(rcNt))
1002 return VINF_SUCCESS;
1003 return RTErrConvertFromNtStatus(rcNt);
1004 }
1005#else
1006 RT_NOREF4(pMem, offSub, cbSub, fProt);
1007#endif
1008
1009 return VERR_NOT_SUPPORTED;
1010}
1011
1012
1013DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
1014{
1015 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
1016
1017 if (pMemNt->cMdls)
1018 {
1019 if (pMemNt->cMdls == 1)
1020 {
1021 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[0]);
1022 return (RTHCPHYS)paPfns[iPage] << PAGE_SHIFT;
1023 }
1024
1025 size_t iMdl = iPage / (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
1026 size_t iMdlPfn = iPage % (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
1027 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[iMdl]);
1028 return (RTHCPHYS)paPfns[iMdlPfn] << PAGE_SHIFT;
1029 }
1030
1031 switch (pMemNt->Core.enmType)
1032 {
1033 case RTR0MEMOBJTYPE_MAPPING:
1034 return rtR0MemObjNativeGetPagePhysAddr(pMemNt->Core.uRel.Child.pParent, iPage);
1035
1036 case RTR0MEMOBJTYPE_PHYS:
1037 return pMemNt->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT);
1038
1039 case RTR0MEMOBJTYPE_PAGE:
1040 case RTR0MEMOBJTYPE_PHYS_NC:
1041 case RTR0MEMOBJTYPE_LOW:
1042 case RTR0MEMOBJTYPE_CONT:
1043 case RTR0MEMOBJTYPE_LOCK:
1044 default:
1045 AssertMsgFailed(("%d\n", pMemNt->Core.enmType));
1046 case RTR0MEMOBJTYPE_RES_VIRT:
1047 return NIL_RTHCPHYS;
1048 }
1049}
1050
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette