VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/nt/memobj-r0drv-nt.cpp@ 104848

Last change on this file since 104848 was 104848, checked in by vboxsync, 8 months ago

VMM/PGM,SUPDrv,IPRT: Added a RTR0MemObjZeroInitialize function to IPRT/SUPDrv for helping zero initializing MMIO2 backing memory. bugref:10687

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 47.7 KB
Line 
1/* $Id: memobj-r0drv-nt.cpp 104848 2024-06-05 09:38:20Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, NT.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * The contents of this file may alternatively be used under the terms
26 * of the Common Development and Distribution License Version 1.0
27 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
28 * in the VirtualBox distribution, in which case the provisions of the
29 * CDDL are applicable instead of those of the GPL.
30 *
31 * You may elect to license modified versions of this file under the
32 * terms and conditions of either the GPL or the CDDL or both.
33 *
34 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
35 */
36
37
38/*********************************************************************************************************************************
39* Header Files *
40*********************************************************************************************************************************/
41#include "the-nt-kernel.h"
42
43#include <iprt/memobj.h>
44#include <iprt/alloc.h>
45#include <iprt/assert.h>
46#include <iprt/err.h>
47#include <iprt/log.h>
48#include <iprt/param.h>
49#include <iprt/string.h>
50#include <iprt/process.h>
51#include "internal/memobj.h"
52#include "internal-r0drv-nt.h"
53
54
55/*********************************************************************************************************************************
56* Defined Constants And Macros *
57*********************************************************************************************************************************/
58/** Maximum number of bytes we try to lock down in one go.
59 * This is supposed to have a limit right below 256MB, but this appears
60 * to actually be much lower. The values here have been determined experimentally.
61 */
62#ifdef RT_ARCH_X86
63# define MAX_LOCK_MEM_SIZE (32*1024*1024) /* 32MB */
64#endif
65#ifdef RT_ARCH_AMD64
66# define MAX_LOCK_MEM_SIZE (24*1024*1024) /* 24MB */
67#endif
68
69/* Newer WDK constants: */
70#ifndef MM_ALLOCATE_REQUIRE_CONTIGUOUS_CHUNKS
71# define MM_ALLOCATE_REQUIRE_CONTIGUOUS_CHUNKS 0x20
72#endif
73#ifndef MM_ALLOCATE_FAST_LARGE_PAGES
74# define MM_ALLOCATE_FAST_LARGE_PAGES 0x40
75#endif
76
77
78/*********************************************************************************************************************************
79* Structures and Typedefs *
80*********************************************************************************************************************************/
81/**
82 * The NT version of the memory object structure.
83 */
84typedef struct RTR0MEMOBJNT
85{
86 /** The core structure. */
87 RTR0MEMOBJINTERNAL Core;
88 /** Used MmAllocatePagesForMdl(). */
89 bool fAllocatedPagesForMdl;
90 /** Set if this is sub-section of the parent. */
91 bool fSubMapping;
92 /** Pointer returned by MmSecureVirtualMemory */
93 PVOID pvSecureMem;
94 /** The number of PMDLs (memory descriptor lists) in the array. */
95 uint32_t cMdls;
96 /** Array of MDL pointers. (variable size) */
97 PMDL apMdls[1];
98} RTR0MEMOBJNT;
99/** Pointer to the NT version of the memory object structure. */
100typedef RTR0MEMOBJNT *PRTR0MEMOBJNT;
101
102
103
104DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
105{
106 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
107
108 /*
109 * Deal with it on a per type basis (just as a variation).
110 */
111 switch (pMemNt->Core.enmType)
112 {
113 case RTR0MEMOBJTYPE_LOW:
114 if (pMemNt->fAllocatedPagesForMdl)
115 {
116 Assert(pMemNt->Core.pv && pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
117 MmUnmapLockedPages(pMemNt->Core.pv, pMemNt->apMdls[0]);
118 pMemNt->Core.pv = NULL;
119 if (pMemNt->pvSecureMem)
120 {
121 g_pfnrtMmUnsecureVirtualMemory(pMemNt->pvSecureMem);
122 pMemNt->pvSecureMem = NULL;
123 }
124
125 g_pfnrtMmFreePagesFromMdl(pMemNt->apMdls[0]);
126 ExFreePool(pMemNt->apMdls[0]);
127 pMemNt->apMdls[0] = NULL;
128 pMemNt->cMdls = 0;
129 break;
130 }
131 AssertFailed();
132 break;
133
134 case RTR0MEMOBJTYPE_PAGE:
135 Assert(pMemNt->Core.pv);
136 if (pMemNt->fAllocatedPagesForMdl)
137 {
138 Assert(pMemNt->Core.pv && pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
139 Assert(pMemNt->pvSecureMem == NULL);
140 MmUnmapLockedPages(pMemNt->Core.pv, pMemNt->apMdls[0]);
141 g_pfnrtMmFreePagesFromMdl(pMemNt->apMdls[0]);
142 ExFreePool(pMemNt->apMdls[0]);
143 }
144 else
145 {
146 if (g_pfnrtExFreePoolWithTag)
147 g_pfnrtExFreePoolWithTag(pMemNt->Core.pv, IPRT_NT_POOL_TAG);
148 else
149 ExFreePool(pMemNt->Core.pv);
150
151 Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
152 IoFreeMdl(pMemNt->apMdls[0]);
153 }
154 pMemNt->Core.pv = NULL;
155 pMemNt->apMdls[0] = NULL;
156 pMemNt->cMdls = 0;
157 break;
158
159 case RTR0MEMOBJTYPE_CONT:
160 Assert(pMemNt->Core.pv);
161 MmFreeContiguousMemory(pMemNt->Core.pv);
162 pMemNt->Core.pv = NULL;
163
164 Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
165 IoFreeMdl(pMemNt->apMdls[0]);
166 pMemNt->apMdls[0] = NULL;
167 pMemNt->cMdls = 0;
168 break;
169
170 case RTR0MEMOBJTYPE_PHYS:
171 /* rtR0MemObjNativeEnterPhys? */
172 if (!pMemNt->Core.u.Phys.fAllocated)
173 {
174 Assert(!pMemNt->fAllocatedPagesForMdl);
175 /* Nothing to do here. */
176 break;
177 }
178 RT_FALL_THRU();
179
180 case RTR0MEMOBJTYPE_PHYS_NC:
181 if (pMemNt->fAllocatedPagesForMdl)
182 {
183 g_pfnrtMmFreePagesFromMdl(pMemNt->apMdls[0]);
184 ExFreePool(pMemNt->apMdls[0]);
185 pMemNt->apMdls[0] = NULL;
186 pMemNt->cMdls = 0;
187 break;
188 }
189 AssertFailed();
190 break;
191
192 case RTR0MEMOBJTYPE_LOCK:
193 if (pMemNt->pvSecureMem)
194 {
195 g_pfnrtMmUnsecureVirtualMemory(pMemNt->pvSecureMem);
196 pMemNt->pvSecureMem = NULL;
197 }
198 for (uint32_t i = 0; i < pMemNt->cMdls; i++)
199 {
200 MmUnlockPages(pMemNt->apMdls[i]);
201 IoFreeMdl(pMemNt->apMdls[i]);
202 pMemNt->apMdls[i] = NULL;
203 }
204 break;
205
206 case RTR0MEMOBJTYPE_RES_VIRT:
207/* if (pMemNt->Core.u.ResVirt.R0Process == NIL_RTR0PROCESS)
208 {
209 }
210 else
211 {
212 }*/
213 AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n"));
214 return VERR_INTERNAL_ERROR;
215 break;
216
217 case RTR0MEMOBJTYPE_MAPPING:
218 {
219 PRTR0MEMOBJNT pMemNtParent = (PRTR0MEMOBJNT)pMemNt->Core.uRel.Child.pParent;
220 Assert(pMemNtParent);
221 Assert(pMemNt->Core.pv);
222 Assert((pMemNt->cMdls == 0 && !pMemNt->fSubMapping) || (pMemNt->cMdls == 1 && pMemNt->fSubMapping));
223 if (pMemNtParent->cMdls)
224 {
225 Assert(pMemNtParent->cMdls == 1 && pMemNtParent->apMdls[0]);
226 Assert( pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS
227 || pMemNt->Core.u.Mapping.R0Process == RTR0ProcHandleSelf());
228 if (!pMemNt->cMdls)
229 MmUnmapLockedPages(pMemNt->Core.pv, pMemNtParent->apMdls[0]);
230 else
231 {
232 MmUnmapLockedPages(pMemNt->Core.pv, pMemNt->apMdls[0]);
233 IoFreeMdl(pMemNt->apMdls[0]);
234 pMemNt->apMdls[0] = NULL;
235 }
236 }
237 else
238 {
239 Assert( pMemNtParent->Core.enmType == RTR0MEMOBJTYPE_PHYS
240 && !pMemNtParent->Core.u.Phys.fAllocated);
241 Assert(pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS);
242 Assert(!pMemNt->fSubMapping);
243 MmUnmapIoSpace(pMemNt->Core.pv, pMemNt->Core.cb);
244 }
245 pMemNt->Core.pv = NULL;
246 break;
247 }
248
249 default:
250 AssertMsgFailed(("enmType=%d\n", pMemNt->Core.enmType));
251 return VERR_INTERNAL_ERROR;
252 }
253
254 return VINF_SUCCESS;
255}
256
257
258DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, const char *pszTag)
259{
260 RT_NOREF1(fExecutable);
261
262 /*
263 * Use MmAllocatePagesForMdl if the allocation is a little bit big.
264 */
265 int rc = VERR_NO_PAGE_MEMORY;
266 if ( cb > _1M
267 && g_pfnrtMmAllocatePagesForMdl
268 && g_pfnrtMmFreePagesFromMdl
269 && g_pfnrtMmMapLockedPagesSpecifyCache)
270 {
271 PHYSICAL_ADDRESS Zero;
272 Zero.QuadPart = 0;
273 PHYSICAL_ADDRESS HighAddr;
274 HighAddr.QuadPart = MAXLONGLONG;
275 PMDL pMdl = g_pfnrtMmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
276 if (pMdl)
277 {
278 if (MmGetMdlByteCount(pMdl) >= cb)
279 {
280 __try
281 {
282 void *pv = g_pfnrtMmMapLockedPagesSpecifyCache(pMdl, KernelMode, MmCached, NULL /* no base address */,
283 FALSE /* no bug check on failure */, NormalPagePriority);
284 if (pv)
285 {
286#ifdef RT_ARCH_AMD64
287 if (fExecutable)
288 MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
289#endif
290
291 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PAGE, pv, cb, pszTag);
292 if (pMemNt)
293 {
294 pMemNt->Core.fFlags |= RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC;
295 pMemNt->fAllocatedPagesForMdl = true;
296 pMemNt->cMdls = 1;
297 pMemNt->apMdls[0] = pMdl;
298 *ppMem = &pMemNt->Core;
299 return VINF_SUCCESS;
300 }
301 MmUnmapLockedPages(pv, pMdl);
302 }
303 }
304 __except(EXCEPTION_EXECUTE_HANDLER)
305 {
306# ifdef LOG_ENABLED
307 NTSTATUS rcNt = GetExceptionCode();
308 Log(("rtR0MemObjNativeAllocLow: Exception Code %#x\n", rcNt));
309# endif
310 /* nothing */
311 }
312 }
313 g_pfnrtMmFreePagesFromMdl(pMdl);
314 ExFreePool(pMdl);
315 }
316 }
317
318 /*
319 * There are serveral limiting factors here. First there the ULONG length
320 * parameter in the IoAllocateMdl call that means the absolute maximum is
321 * 4GiB - PAGE_SIZE. That API is documented to limit the max length
322 * differently for the windows versions:
323 * - Pre-vista: (65535 - sizeof(MLD)/sizeof(ULONG_PTR)) * PAGE_SIZE
324 * - Vista & Server 2008: 2GiB - PAGE_SIZE.
325 * - Windows 7 & Server 2008 R2 and higher: 4GiB - PAGE_SIZE.
326 *
327 * Before that we've got the limitations of the pool code and available
328 * kernel mapping space (32-bit).
329 */
330#if ARCH_BITS == 32
331 AssertMsgReturn(cb < _512M, ("%#x\n", cb), VERR_OUT_OF_RANGE);
332#else
333 AssertMsgReturn(cb < _4G, ("%#x\n", cb), VERR_OUT_OF_RANGE);
334#endif
335
336 /*
337 * Try allocate the memory and create an MDL for them so
338 * we can query the physical addresses and do mappings later
339 * without running into out-of-memory conditions and similar problems.
340 */
341 void *pv;
342 if (g_pfnrtExAllocatePoolWithTag)
343 pv = g_pfnrtExAllocatePoolWithTag(NonPagedPool, cb, IPRT_NT_POOL_TAG);
344 else
345 pv = ExAllocatePool(NonPagedPool, cb);
346 if (pv)
347 {
348 PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
349 if (pMdl)
350 {
351 MmBuildMdlForNonPagedPool(pMdl);
352#ifdef RT_ARCH_AMD64
353 if (fExecutable)
354 MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
355#endif
356
357 /*
358 * Create the IPRT memory object.
359 */
360 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PAGE, pv, cb, pszTag);
361 if (pMemNt)
362 {
363 pMemNt->Core.fFlags |= RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC;
364 pMemNt->cMdls = 1;
365 pMemNt->apMdls[0] = pMdl;
366 *ppMem = &pMemNt->Core;
367 return VINF_SUCCESS;
368 }
369
370 rc = VERR_NO_MEMORY;
371 IoFreeMdl(pMdl);
372 }
373 ExFreePool(pv);
374 }
375 return rc;
376}
377
378
379/**
380 * Helper for rtR0MemObjNativeAllocLarge that verifies the result.
381 */
382static bool rtR0MemObjNtVerifyLargePageAlloc(PMDL pMdl, size_t cb, size_t cbLargePage)
383{
384 if (MmGetMdlByteCount(pMdl) >= cb)
385 {
386 PPFN_NUMBER const paPfns = MmGetMdlPfnArray(pMdl);
387 size_t const cPagesPerLargePage = cbLargePage >> PAGE_SHIFT;
388 size_t const cLargePages = cb / cbLargePage;
389 size_t iPage = 0;
390 for (size_t iLargePage = 0; iLargePage < cLargePages; iLargePage++)
391 {
392 PFN_NUMBER Pfn = paPfns[iPage];
393 if (!(Pfn & (cbLargePage >> PAGE_SHIFT) - 1U))
394 {
395 for (size_t iSubPage = 1; iSubPage < cPagesPerLargePage; iSubPage++)
396 {
397 iPage++;
398 Pfn++;
399 if (paPfns[iPage] == Pfn)
400 { /* likely */ }
401 else
402 {
403 Log(("rtR0MemObjNativeAllocLarge: Subpage %#zu in large page #%zu is not contiguous: %#x, expected %#x\n",
404 iSubPage, iLargePage, paPfns[iPage], Pfn));
405 return false;
406 }
407 }
408 }
409 else
410 {
411 Log(("rtR0MemObjNativeAllocLarge: Large page #%zu is misaligned: %#x, cbLargePage=%#zx\n",
412 iLargePage, Pfn, cbLargePage));
413 return false;
414 }
415 }
416 return true;
417 }
418 Log(("rtR0MemObjNativeAllocLarge: Got back too few pages: %#zx, requested %#zx\n", MmGetMdlByteCount(pMdl), cb));
419 return false;
420}
421
422
423DECLHIDDEN(int) rtR0MemObjNativeAllocLarge(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, size_t cbLargePage, uint32_t fFlags,
424 const char *pszTag)
425{
426 /*
427 * Need the MmAllocatePagesForMdlEx function so we can specify flags.
428 */
429 if ( g_uRtNtVersion >= RTNT_MAKE_VERSION(6,1) /* Windows 7+ */
430 && g_pfnrtMmAllocatePagesForMdlEx
431 && g_pfnrtMmFreePagesFromMdl
432 && g_pfnrtMmMapLockedPagesSpecifyCache)
433 {
434 ULONG fNtFlags = MM_ALLOCATE_FULLY_REQUIRED /* W7+: Make it fail if we don't get all we ask for.*/
435 | MM_ALLOCATE_REQUIRE_CONTIGUOUS_CHUNKS; /* W7+: The SkipBytes chunks must be physcially contiguous. */
436 if ((fFlags & RTMEMOBJ_ALLOC_LARGE_F_FAST) && g_uRtNtVersion >= RTNT_MAKE_VERSION(6, 2))
437 fNtFlags |= MM_ALLOCATE_FAST_LARGE_PAGES; /* W8+: Don't try too hard, just fail if not enough handy. */
438
439 PHYSICAL_ADDRESS Zero;
440 Zero.QuadPart = 0;
441
442 PHYSICAL_ADDRESS HighAddr;
443 HighAddr.QuadPart = MAXLONGLONG;
444
445 PHYSICAL_ADDRESS Skip;
446 Skip.QuadPart = cbLargePage;
447
448 int rc;
449 PMDL const pMdl = g_pfnrtMmAllocatePagesForMdlEx(Zero, HighAddr, Skip, cb, MmCached, fNtFlags);
450 if (pMdl)
451 {
452 /* Verify the result. */
453 if (rtR0MemObjNtVerifyLargePageAlloc(pMdl, cb, cbLargePage))
454 {
455 /*
456 * Map the allocation into kernel space. Unless the memory is already mapped
457 * somewhere (seems to be actually), I guess it's unlikely that we'll get a
458 * large page aligned mapping back here...
459 */
460 __try
461 {
462 void *pv = g_pfnrtMmMapLockedPagesSpecifyCache(pMdl, KernelMode, MmCached, NULL /* no base address */,
463 FALSE /* no bug check on failure */, NormalPagePriority);
464 if (pv)
465 {
466 /*
467 * Create the memory object.
468 */
469 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PAGE, pv, cb, pszTag);
470 if (pMemNt)
471 {
472 pMemNt->Core.fFlags |= RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC;
473 pMemNt->fAllocatedPagesForMdl = true;
474 pMemNt->cMdls = 1;
475 pMemNt->apMdls[0] = pMdl;
476 *ppMem = &pMemNt->Core;
477 return VINF_SUCCESS;
478 }
479
480 MmUnmapLockedPages(pv, pMdl);
481 }
482 }
483 __except(EXCEPTION_EXECUTE_HANDLER)
484 {
485#ifdef LOG_ENABLED
486 NTSTATUS rcNt = GetExceptionCode();
487 Log(("rtR0MemObjNativeAllocLarge: Exception Code %#x\n", rcNt));
488#endif
489 /* nothing */
490 }
491 }
492
493 g_pfnrtMmFreePagesFromMdl(pMdl);
494 ExFreePool(pMdl);
495 rc = VERR_NO_MEMORY;
496 }
497 else
498 rc = fFlags & RTMEMOBJ_ALLOC_LARGE_F_FAST ? VERR_TRY_AGAIN : VERR_NO_MEMORY;
499 return rc;
500 }
501
502 return rtR0MemObjFallbackAllocLarge(ppMem, cb, cbLargePage, fFlags, pszTag);
503}
504
505
506DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, const char *pszTag)
507{
508 AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
509
510 /*
511 * Try see if we get lucky first...
512 * (We could probably just assume we're lucky on NT4.)
513 */
514 int rc = rtR0MemObjNativeAllocPage(ppMem, cb, fExecutable, pszTag);
515 if (RT_SUCCESS(rc))
516 {
517 size_t iPage = cb >> PAGE_SHIFT;
518 while (iPage-- > 0)
519 if (rtR0MemObjNativeGetPagePhysAddr(*ppMem, iPage) >= _4G)
520 {
521 rc = VERR_NO_LOW_MEMORY;
522 break;
523 }
524 if (RT_SUCCESS(rc))
525 return rc;
526
527 /* The following ASSUMES that rtR0MemObjNativeAllocPage returns a completed object. */
528 RTR0MemObjFree(*ppMem, false);
529 *ppMem = NULL;
530 }
531
532 /*
533 * Use MmAllocatePagesForMdl to specify the range of physical addresses we wish to use.
534 */
535 if ( g_pfnrtMmAllocatePagesForMdl
536 && g_pfnrtMmFreePagesFromMdl
537 && g_pfnrtMmMapLockedPagesSpecifyCache)
538 {
539 PHYSICAL_ADDRESS Zero;
540 Zero.QuadPart = 0;
541 PHYSICAL_ADDRESS HighAddr;
542 HighAddr.QuadPart = _4G - 1;
543 PMDL pMdl = g_pfnrtMmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
544 if (pMdl)
545 {
546 if (MmGetMdlByteCount(pMdl) >= cb)
547 {
548 __try
549 {
550 void *pv = g_pfnrtMmMapLockedPagesSpecifyCache(pMdl, KernelMode, MmCached, NULL /* no base address */,
551 FALSE /* no bug check on failure */, NormalPagePriority);
552 if (pv)
553 {
554 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_LOW, pv, cb, pszTag);
555 if (pMemNt)
556 {
557 pMemNt->Core.fFlags |= RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC;
558 pMemNt->fAllocatedPagesForMdl = true;
559 pMemNt->cMdls = 1;
560 pMemNt->apMdls[0] = pMdl;
561 *ppMem = &pMemNt->Core;
562 return VINF_SUCCESS;
563 }
564 MmUnmapLockedPages(pv, pMdl);
565 }
566 }
567 __except(EXCEPTION_EXECUTE_HANDLER)
568 {
569# ifdef LOG_ENABLED
570 NTSTATUS rcNt = GetExceptionCode();
571 Log(("rtR0MemObjNativeAllocLow: Exception Code %#x\n", rcNt));
572# endif
573 /* nothing */
574 }
575 }
576 g_pfnrtMmFreePagesFromMdl(pMdl);
577 ExFreePool(pMdl);
578 }
579 }
580
581 /*
582 * Fall back on contiguous memory...
583 */
584 return rtR0MemObjNativeAllocCont(ppMem, cb, _4G - 1, fExecutable, pszTag);
585}
586
587
588/**
589 * Internal worker for rtR0MemObjNativeAllocCont(), rtR0MemObjNativeAllocPhys()
590 * and rtR0MemObjNativeAllocPhysNC() that takes a max physical address in addition
591 * to what rtR0MemObjNativeAllocCont() does.
592 *
593 * @returns IPRT status code.
594 * @param ppMem Where to store the pointer to the ring-0 memory object.
595 * @param cb The size.
596 * @param fExecutable Whether the mapping should be executable or not.
597 * @param PhysHighest The highest physical address for the pages in allocation.
598 * @param uAlignment The alignment of the physical memory to allocate.
599 * Supported values are PAGE_SIZE, _2M, _4M and _1G.
600 * @param pszTag Allocation tag used for statistics and such.
601 */
602static int rtR0MemObjNativeAllocContEx(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, RTHCPHYS PhysHighest,
603 size_t uAlignment, const char *pszTag)
604{
605 AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
606 RT_NOREF1(fExecutable);
607
608 /*
609 * Allocate the memory and create an MDL for it.
610 */
611 PHYSICAL_ADDRESS PhysAddrHighest;
612 PhysAddrHighest.QuadPart = PhysHighest;
613 void *pv;
614 if (g_pfnrtMmAllocateContiguousMemorySpecifyCache)
615 {
616 PHYSICAL_ADDRESS PhysAddrLowest, PhysAddrBoundary;
617 PhysAddrLowest.QuadPart = 0;
618 PhysAddrBoundary.QuadPart = (uAlignment == PAGE_SIZE) ? 0 : uAlignment;
619 pv = g_pfnrtMmAllocateContiguousMemorySpecifyCache(cb, PhysAddrLowest, PhysAddrHighest, PhysAddrBoundary, MmCached);
620 }
621 else if (uAlignment == PAGE_SIZE)
622 pv = MmAllocateContiguousMemory(cb, PhysAddrHighest);
623 else
624 return VERR_NOT_SUPPORTED;
625 if (!pv)
626 return VERR_NO_MEMORY;
627
628 PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
629 if (pMdl)
630 {
631 MmBuildMdlForNonPagedPool(pMdl);
632#ifdef RT_ARCH_AMD64
633 if (fExecutable)
634 MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
635#endif
636
637 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_CONT, pv, cb, pszTag);
638 if (pMemNt)
639 {
640 pMemNt->Core.fFlags |= RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC;
641 pMemNt->Core.u.Cont.Phys = (RTHCPHYS)*MmGetMdlPfnArray(pMdl) << PAGE_SHIFT;
642 pMemNt->cMdls = 1;
643 pMemNt->apMdls[0] = pMdl;
644 *ppMem = &pMemNt->Core;
645 return VINF_SUCCESS;
646 }
647
648 IoFreeMdl(pMdl);
649 }
650 MmFreeContiguousMemory(pv);
651 return VERR_NO_MEMORY;
652}
653
654
655DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest,
656 bool fExecutable, const char *pszTag)
657{
658 return rtR0MemObjNativeAllocContEx(ppMem, cb, fExecutable, PhysHighest, PAGE_SIZE /* alignment */, pszTag);
659}
660
661
662DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment,
663 const char *pszTag)
664{
665 /*
666 * Try and see if we're lucky and get a contiguous chunk from MmAllocatePagesForMdl.
667 *
668 * This is preferable to using MmAllocateContiguousMemory because there are
669 * a few situations where the memory shouldn't be mapped, like for instance
670 * VT-x control memory. Since these are rather small allocations (one or
671 * two pages) MmAllocatePagesForMdl will probably be able to satisfy the
672 * request.
673 *
674 * If the allocation is big, the chances are *probably* not very good. The
675 * current limit is kind of random...
676 */
677 if ( cb < _128K
678 && uAlignment == PAGE_SIZE
679 && g_pfnrtMmAllocatePagesForMdl
680 && g_pfnrtMmFreePagesFromMdl)
681 {
682 PHYSICAL_ADDRESS Zero;
683 Zero.QuadPart = 0;
684 PHYSICAL_ADDRESS HighAddr;
685 HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
686 PMDL pMdl = g_pfnrtMmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
687 if (pMdl)
688 {
689 if (MmGetMdlByteCount(pMdl) >= cb)
690 {
691 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMdl);
692 PFN_NUMBER Pfn = paPfns[0] + 1;
693 const size_t cPages = cb >> PAGE_SHIFT;
694 size_t iPage;
695 for (iPage = 1; iPage < cPages; iPage++, Pfn++)
696 if (paPfns[iPage] != Pfn)
697 break;
698 if (iPage >= cPages)
699 {
700 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb, pszTag);
701 if (pMemNt)
702 {
703 pMemNt->Core.fFlags |= RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC;
704 pMemNt->Core.u.Phys.fAllocated = true;
705 pMemNt->Core.u.Phys.PhysBase = (RTHCPHYS)paPfns[0] << PAGE_SHIFT;
706 pMemNt->fAllocatedPagesForMdl = true;
707 pMemNt->cMdls = 1;
708 pMemNt->apMdls[0] = pMdl;
709 *ppMem = &pMemNt->Core;
710 return VINF_SUCCESS;
711 }
712 }
713 }
714 g_pfnrtMmFreePagesFromMdl(pMdl);
715 ExFreePool(pMdl);
716 }
717 }
718
719 return rtR0MemObjNativeAllocContEx(ppMem, cb, false, PhysHighest, uAlignment, pszTag);
720}
721
722
723DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, const char *pszTag)
724{
725 if (g_pfnrtMmAllocatePagesForMdl && g_pfnrtMmFreePagesFromMdl)
726 {
727 /** @todo use the Ex version with the fail-if-not-all-requested-pages flag
728 * when possible. */
729 PHYSICAL_ADDRESS Zero;
730 Zero.QuadPart = 0;
731 PHYSICAL_ADDRESS HighAddr;
732 HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
733 PMDL pMdl = g_pfnrtMmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
734 if (pMdl)
735 {
736 if (MmGetMdlByteCount(pMdl) >= cb)
737 {
738 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS_NC, NULL, cb, pszTag);
739 if (pMemNt)
740 {
741 pMemNt->Core.fFlags |= RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC;
742 pMemNt->fAllocatedPagesForMdl = true;
743 pMemNt->cMdls = 1;
744 pMemNt->apMdls[0] = pMdl;
745 *ppMem = &pMemNt->Core;
746 return VINF_SUCCESS;
747 }
748 }
749 g_pfnrtMmFreePagesFromMdl(pMdl);
750 ExFreePool(pMdl);
751 }
752 return VERR_NO_MEMORY;
753 }
754 return VERR_NOT_SUPPORTED;
755}
756
757
758DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy,
759 const char *pszTag)
760{
761 AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE || uCachePolicy == RTMEM_CACHE_POLICY_MMIO, VERR_NOT_SUPPORTED);
762
763 /*
764 * Validate the address range and create a descriptor for it.
765 */
766 PFN_NUMBER Pfn = (PFN_NUMBER)(Phys >> PAGE_SHIFT);
767 if (((RTHCPHYS)Pfn << PAGE_SHIFT) != Phys)
768 return VERR_ADDRESS_TOO_BIG;
769
770 /*
771 * Create the IPRT memory object.
772 */
773 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb, pszTag);
774 if (pMemNt)
775 {
776 pMemNt->Core.u.Phys.PhysBase = Phys;
777 pMemNt->Core.u.Phys.fAllocated = false;
778 pMemNt->Core.u.Phys.uCachePolicy = uCachePolicy;
779 *ppMem = &pMemNt->Core;
780 return VINF_SUCCESS;
781 }
782 return VERR_NO_MEMORY;
783}
784
785
786/**
787 * Internal worker for locking down pages.
788 *
789 * @return IPRT status code.
790 *
791 * @param ppMem Where to store the memory object pointer.
792 * @param pv First page.
793 * @param cb Number of bytes.
794 * @param fAccess The desired access, a combination of RTMEM_PROT_READ
795 * and RTMEM_PROT_WRITE.
796 * @param R0Process The process \a pv and \a cb refers to.
797 * @param pszTag Allocation tag used for statistics and such.
798 */
799static int rtR0MemObjNtLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process,
800 const char *pszTag)
801{
802 /*
803 * Calc the number of MDLs we need and allocate the memory object structure.
804 */
805 size_t cMdls = cb / MAX_LOCK_MEM_SIZE;
806 if (cb % MAX_LOCK_MEM_SIZE)
807 cMdls++;
808 if (cMdls >= UINT32_MAX)
809 return VERR_OUT_OF_RANGE;
810 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(RT_UOFFSETOF_DYN(RTR0MEMOBJNT, apMdls[cMdls]),
811 RTR0MEMOBJTYPE_LOCK, pv, cb, pszTag);
812 if (!pMemNt)
813 return VERR_NO_MEMORY;
814
815 /*
816 * Loop locking down the sub parts of the memory.
817 */
818 int rc = VINF_SUCCESS;
819 size_t cbTotal = 0;
820 uint8_t *pb = (uint8_t *)pv;
821 uint32_t iMdl;
822 for (iMdl = 0; iMdl < cMdls; iMdl++)
823 {
824 /*
825 * Calc the Mdl size and allocate it.
826 */
827 size_t cbCur = cb - cbTotal;
828 if (cbCur > MAX_LOCK_MEM_SIZE)
829 cbCur = MAX_LOCK_MEM_SIZE;
830 AssertMsg(cbCur, ("cbCur: 0!\n"));
831 PMDL pMdl = IoAllocateMdl(pb, (ULONG)cbCur, FALSE, FALSE, NULL);
832 if (!pMdl)
833 {
834 rc = VERR_NO_MEMORY;
835 break;
836 }
837
838 /*
839 * Lock the pages.
840 */
841 __try
842 {
843 MmProbeAndLockPages(pMdl,
844 R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode,
845 fAccess == RTMEM_PROT_READ
846 ? IoReadAccess
847 : fAccess == RTMEM_PROT_WRITE
848 ? IoWriteAccess
849 : IoModifyAccess);
850
851 pMemNt->apMdls[iMdl] = pMdl;
852 pMemNt->cMdls++;
853 }
854 __except(EXCEPTION_EXECUTE_HANDLER)
855 {
856 IoFreeMdl(pMdl);
857 rc = VERR_LOCK_FAILED;
858 break;
859 }
860
861 if ( R0Process != NIL_RTR0PROCESS
862 && g_pfnrtMmSecureVirtualMemory
863 && g_pfnrtMmUnsecureVirtualMemory)
864 {
865 /* Make sure the user process can't change the allocation. */
866 pMemNt->pvSecureMem = g_pfnrtMmSecureVirtualMemory(pv, cb,
867 fAccess & RTMEM_PROT_WRITE
868 ? PAGE_READWRITE
869 : PAGE_READONLY);
870 if (!pMemNt->pvSecureMem)
871 {
872 rc = VERR_NO_MEMORY;
873 break;
874 }
875 }
876
877 /* next */
878 cbTotal += cbCur;
879 pb += cbCur;
880 }
881 if (RT_SUCCESS(rc))
882 {
883 Assert(pMemNt->cMdls == cMdls);
884 pMemNt->Core.u.Lock.R0Process = R0Process;
885 *ppMem = &pMemNt->Core;
886 return rc;
887 }
888
889 /*
890 * We failed, perform cleanups.
891 */
892 while (iMdl-- > 0)
893 {
894 MmUnlockPages(pMemNt->apMdls[iMdl]);
895 IoFreeMdl(pMemNt->apMdls[iMdl]);
896 pMemNt->apMdls[iMdl] = NULL;
897 }
898 if (pMemNt->pvSecureMem)
899 {
900 if (g_pfnrtMmUnsecureVirtualMemory)
901 g_pfnrtMmUnsecureVirtualMemory(pMemNt->pvSecureMem);
902 pMemNt->pvSecureMem = NULL;
903 }
904
905 rtR0MemObjDelete(&pMemNt->Core);
906 return rc;
907}
908
909
910DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess,
911 RTR0PROCESS R0Process, const char *pszTag)
912{
913 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
914 /* (Can use MmProbeAndLockProcessPages if we need to mess with other processes later.) */
915 return rtR0MemObjNtLock(ppMem, (void *)R3Ptr, cb, fAccess, R0Process, pszTag);
916}
917
918
919DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess, const char *pszTag)
920{
921 return rtR0MemObjNtLock(ppMem, pv, cb, fAccess, NIL_RTR0PROCESS, pszTag);
922}
923
924
925DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment,
926 const char *pszTag)
927{
928 /*
929 * MmCreateSection(SEC_RESERVE) + MmMapViewInSystemSpace perhaps?
930 * Or MmAllocateMappingAddress?
931 */
932 RT_NOREF(ppMem, pvFixed, cb, uAlignment, pszTag);
933 return VERR_NOT_SUPPORTED;
934}
935
936
937DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment,
938 RTR0PROCESS R0Process, const char *pszTag)
939{
940 /*
941 * ZeCreateSection(SEC_RESERVE) + ZwMapViewOfSection perhaps?
942 */
943 RT_NOREF(ppMem, R3PtrFixed, cb, uAlignment, R0Process, pszTag);
944 return VERR_NOT_SUPPORTED;
945}
946
947
948/**
949 * Internal worker for rtR0MemObjNativeMapKernel and rtR0MemObjNativeMapUser.
950 *
951 * @returns IPRT status code.
952 * @param ppMem Where to store the memory object for the mapping.
953 * @param pMemToMap The memory object to map.
954 * @param pvFixed Where to map it. (void *)-1 if anywhere is fine.
955 * @param uAlignment The alignment requirement for the mapping.
956 * @param fProt The desired page protection for the mapping.
957 * @param R0Process If NIL_RTR0PROCESS map into system (kernel) memory.
958 * If not nil, it's the current process.
959 * @param offSub Offset into @a pMemToMap to start mapping.
960 * @param cbSub The number of bytes to map from @a pMapToMem. 0 if
961 * we're to map everything. Non-zero if @a offSub is
962 * non-zero.
963 * @param pszTag Allocation tag used for statistics and such.
964 */
965static int rtR0MemObjNtMap(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
966 unsigned fProt, RTR0PROCESS R0Process, size_t offSub, size_t cbSub, const char *pszTag)
967{
968 int rc = VERR_MAP_FAILED;
969
970 /*
971 * Check that the specified alignment is supported.
972 */
973 if (uAlignment > PAGE_SIZE)
974 return VERR_NOT_SUPPORTED;
975
976 /*
977 * There are two basic cases here, either we've got an MDL and can
978 * map it using MmMapLockedPages, or we've got a contiguous physical
979 * range (MMIO most likely) and can use MmMapIoSpace.
980 */
981 PRTR0MEMOBJNT pMemNtToMap = (PRTR0MEMOBJNT)pMemToMap;
982 if (pMemNtToMap->cMdls)
983 {
984 /* don't attempt map locked regions with more than one mdl. */
985 if (pMemNtToMap->cMdls != 1)
986 return VERR_NOT_SUPPORTED;
987
988 /* Need g_pfnrtMmMapLockedPagesSpecifyCache to map to a specific address. */
989 if (pvFixed != (void *)-1 && g_pfnrtMmMapLockedPagesSpecifyCache == NULL)
990 return VERR_NOT_SUPPORTED;
991
992 /* we can't map anything to the first page, sorry. */
993 if (pvFixed == 0)
994 return VERR_NOT_SUPPORTED;
995
996 /* only one system mapping for now - no time to figure out MDL restrictions right now. */
997 if ( pMemNtToMap->Core.uRel.Parent.cMappings
998 && R0Process == NIL_RTR0PROCESS)
999 {
1000 if (pMemNtToMap->Core.enmType != RTR0MEMOBJTYPE_PHYS_NC)
1001 return VERR_NOT_SUPPORTED;
1002 uint32_t iMapping = pMemNtToMap->Core.uRel.Parent.cMappings;
1003 while (iMapping-- > 0)
1004 {
1005 PRTR0MEMOBJNT pMapping = (PRTR0MEMOBJNT)pMemNtToMap->Core.uRel.Parent.papMappings[iMapping];
1006 if ( pMapping->Core.enmType != RTR0MEMOBJTYPE_MAPPING
1007 || pMapping->Core.u.Mapping.R0Process == NIL_RTR0PROCESS)
1008 return VERR_NOT_SUPPORTED;
1009 }
1010 }
1011
1012 /* Create a partial MDL if this is a sub-range request. */
1013 PMDL pMdl;
1014 if (!offSub && !cbSub)
1015 pMdl = pMemNtToMap->apMdls[0];
1016 else
1017 {
1018 pMdl = IoAllocateMdl(NULL, (ULONG)cbSub, FALSE, FALSE, NULL);
1019 if (pMdl)
1020 IoBuildPartialMdl(pMemNtToMap->apMdls[0], pMdl,
1021 (uint8_t *)MmGetMdlVirtualAddress(pMemNtToMap->apMdls[0]) + offSub, (ULONG)cbSub);
1022 else
1023 {
1024 IoFreeMdl(pMdl);
1025 return VERR_NO_MEMORY;
1026 }
1027 }
1028
1029 __try
1030 {
1031 /** @todo uAlignment */
1032 /** @todo How to set the protection on the pages? */
1033 void *pv;
1034 if (g_pfnrtMmMapLockedPagesSpecifyCache)
1035 pv = g_pfnrtMmMapLockedPagesSpecifyCache(pMdl,
1036 R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode,
1037 MmCached,
1038 pvFixed != (void *)-1 ? pvFixed : NULL,
1039 FALSE /* no bug check on failure */,
1040 NormalPagePriority);
1041 else
1042 pv = MmMapLockedPages(pMdl, R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode);
1043 if (pv)
1044 {
1045 NOREF(fProt);
1046
1047 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew( !offSub && !cbSub
1048 ? sizeof(*pMemNt) : RT_UOFFSETOF_DYN(RTR0MEMOBJNT, apMdls[1]),
1049 RTR0MEMOBJTYPE_MAPPING, pv, pMemNtToMap->Core.cb, pszTag);
1050 if (pMemNt)
1051 {
1052 pMemNt->Core.u.Mapping.R0Process = R0Process;
1053 if (!offSub && !cbSub)
1054 pMemNt->fSubMapping = false;
1055 else
1056 {
1057 pMemNt->apMdls[0] = pMdl;
1058 pMemNt->cMdls = 1;
1059 pMemNt->fSubMapping = true;
1060 }
1061
1062 *ppMem = &pMemNt->Core;
1063 return VINF_SUCCESS;
1064 }
1065
1066 rc = VERR_NO_MEMORY;
1067 MmUnmapLockedPages(pv, pMdl);
1068 }
1069 }
1070 __except(EXCEPTION_EXECUTE_HANDLER)
1071 {
1072#ifdef LOG_ENABLED
1073 NTSTATUS rcNt = GetExceptionCode();
1074 Log(("rtR0MemObjNtMap: Exception Code %#x\n", rcNt));
1075#endif
1076
1077 /* nothing */
1078 rc = VERR_MAP_FAILED;
1079 }
1080
1081 }
1082 else
1083 {
1084 AssertReturn( pMemNtToMap->Core.enmType == RTR0MEMOBJTYPE_PHYS
1085 && !pMemNtToMap->Core.u.Phys.fAllocated, VERR_INTERNAL_ERROR);
1086
1087 /* cannot map phys mem to user space (yet). */
1088 if (R0Process != NIL_RTR0PROCESS)
1089 return VERR_NOT_SUPPORTED;
1090
1091 /* Cannot sub-mak these (yet). */
1092 AssertMsgReturn(!offSub && !cbSub, ("%#zx %#zx\n", offSub, cbSub), VERR_NOT_SUPPORTED);
1093
1094
1095 /** @todo uAlignment */
1096 /** @todo How to set the protection on the pages? */
1097 PHYSICAL_ADDRESS Phys;
1098 Phys.QuadPart = pMemNtToMap->Core.u.Phys.PhysBase;
1099 void *pv = MmMapIoSpace(Phys, pMemNtToMap->Core.cb,
1100 pMemNtToMap->Core.u.Phys.uCachePolicy == RTMEM_CACHE_POLICY_MMIO ? MmNonCached : MmCached);
1101 if (pv)
1102 {
1103 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING, pv,
1104 pMemNtToMap->Core.cb, pszTag);
1105 if (pMemNt)
1106 {
1107 pMemNt->Core.u.Mapping.R0Process = R0Process;
1108 *ppMem = &pMemNt->Core;
1109 return VINF_SUCCESS;
1110 }
1111
1112 rc = VERR_NO_MEMORY;
1113 MmUnmapIoSpace(pv, pMemNtToMap->Core.cb);
1114 }
1115 }
1116
1117 NOREF(uAlignment); NOREF(fProt);
1118 return rc;
1119}
1120
1121
1122DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
1123 unsigned fProt, size_t offSub, size_t cbSub, const char *pszTag)
1124{
1125 return rtR0MemObjNtMap(ppMem, pMemToMap, pvFixed, uAlignment, fProt, NIL_RTR0PROCESS, offSub, cbSub, pszTag);
1126}
1127
1128
1129DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment,
1130 unsigned fProt, RTR0PROCESS R0Process, size_t offSub, size_t cbSub, const char *pszTag)
1131{
1132 AssertReturn(R0Process == RTR0ProcHandleSelf(), VERR_NOT_SUPPORTED);
1133 return rtR0MemObjNtMap(ppMem, pMemToMap, (void *)R3PtrFixed, uAlignment, fProt, R0Process, offSub, cbSub, pszTag);
1134}
1135
1136
1137DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
1138{
1139#if 0
1140 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
1141#endif
1142
1143 /*
1144 * Seems there are some issues with this MmProtectMdlSystemAddress API, so
1145 * this code isn't currently enabled until we've tested it with the verifier.
1146 */
1147#if 0
1148 /*
1149 * The API we've got requires a kernel mapping.
1150 */
1151 if ( pMemNt->cMdls
1152 && g_pfnrtMmProtectMdlSystemAddress
1153 && (g_uRtNtMajorVer > 6 || (g_uRtNtMajorVer == 6 && g_uRtNtMinorVer >= 1)) /* Windows 7 and later. */
1154 && pMemNt->Core.pv != NULL
1155 && ( pMemNt->Core.enmType == RTR0MEMOBJTYPE_PAGE
1156 || pMemNt->Core.enmType == RTR0MEMOBJTYPE_LOW
1157 || pMemNt->Core.enmType == RTR0MEMOBJTYPE_CONT
1158 || ( pMemNt->Core.enmType == RTR0MEMOBJTYPE_LOCK
1159 && pMemNt->Core.u.Lock.R0Process == NIL_RTPROCESS)
1160 || ( pMemNt->Core.enmType == RTR0MEMOBJTYPE_MAPPING
1161 && pMemNt->Core.u.Mapping.R0Process == NIL_RTPROCESS) ) )
1162 {
1163 /* Convert the protection. */
1164 LOCK_OPERATION enmLockOp;
1165 ULONG fAccess;
1166 switch (fProt)
1167 {
1168 case RTMEM_PROT_NONE:
1169 fAccess = PAGE_NOACCESS;
1170 enmLockOp = IoReadAccess;
1171 break;
1172 case RTMEM_PROT_READ:
1173 fAccess = PAGE_READONLY;
1174 enmLockOp = IoReadAccess;
1175 break;
1176 case RTMEM_PROT_WRITE:
1177 case RTMEM_PROT_WRITE | RTMEM_PROT_READ:
1178 fAccess = PAGE_READWRITE;
1179 enmLockOp = IoModifyAccess;
1180 break;
1181 case RTMEM_PROT_EXEC:
1182 fAccess = PAGE_EXECUTE;
1183 enmLockOp = IoReadAccess;
1184 break;
1185 case RTMEM_PROT_EXEC | RTMEM_PROT_READ:
1186 fAccess = PAGE_EXECUTE_READ;
1187 enmLockOp = IoReadAccess;
1188 break;
1189 case RTMEM_PROT_EXEC | RTMEM_PROT_WRITE:
1190 case RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ:
1191 fAccess = PAGE_EXECUTE_READWRITE;
1192 enmLockOp = IoModifyAccess;
1193 break;
1194 default:
1195 AssertFailedReturn(VERR_INVALID_FLAGS);
1196 }
1197
1198 NTSTATUS rcNt = STATUS_SUCCESS;
1199# if 0 /** @todo test this against the verifier. */
1200 if (offSub == 0 && pMemNt->Core.cb == cbSub)
1201 {
1202 uint32_t iMdl = pMemNt->cMdls;
1203 while (iMdl-- > 0)
1204 {
1205 rcNt = g_pfnrtMmProtectMdlSystemAddress(pMemNt->apMdls[i], fAccess);
1206 if (!NT_SUCCESS(rcNt))
1207 break;
1208 }
1209 }
1210 else
1211# endif
1212 {
1213 /*
1214 * We ASSUME the following here:
1215 * - MmProtectMdlSystemAddress can deal with nonpaged pool memory
1216 * - MmProtectMdlSystemAddress doesn't actually store anything in the MDL we pass it.
1217 * - We are not required to call MmProtectMdlSystemAddress with PAGE_READWRITE for the
1218 * exact same ranges prior to freeing them.
1219 *
1220 * So, we lock the pages temporarily, call the API and unlock them.
1221 */
1222 uint8_t *pbCur = (uint8_t *)pMemNt->Core.pv + offSub;
1223 while (cbSub > 0 && NT_SUCCESS(rcNt))
1224 {
1225 size_t cbCur = cbSub;
1226 if (cbCur > MAX_LOCK_MEM_SIZE)
1227 cbCur = MAX_LOCK_MEM_SIZE;
1228 PMDL pMdl = IoAllocateMdl(pbCur, (ULONG)cbCur, FALSE, FALSE, NULL);
1229 if (pMdl)
1230 {
1231 __try
1232 {
1233 MmProbeAndLockPages(pMdl, KernelMode, enmLockOp);
1234 }
1235 __except(EXCEPTION_EXECUTE_HANDLER)
1236 {
1237 rcNt = GetExceptionCode();
1238 }
1239 if (NT_SUCCESS(rcNt))
1240 {
1241 rcNt = g_pfnrtMmProtectMdlSystemAddress(pMdl, fAccess);
1242 MmUnlockPages(pMdl);
1243 }
1244 IoFreeMdl(pMdl);
1245 }
1246 else
1247 rcNt = STATUS_NO_MEMORY;
1248 pbCur += cbCur;
1249 cbSub -= cbCur;
1250 }
1251 }
1252
1253 if (NT_SUCCESS(rcNt))
1254 return VINF_SUCCESS;
1255 return RTErrConvertFromNtStatus(rcNt);
1256 }
1257#else
1258 RT_NOREF4(pMem, offSub, cbSub, fProt);
1259#endif
1260
1261 return VERR_NOT_SUPPORTED;
1262}
1263
1264
1265DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
1266{
1267 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
1268
1269 if (pMemNt->cMdls)
1270 {
1271 if (pMemNt->cMdls == 1)
1272 {
1273 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[0]);
1274 return (RTHCPHYS)paPfns[iPage] << PAGE_SHIFT;
1275 }
1276
1277 size_t iMdl = iPage / (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
1278 size_t iMdlPfn = iPage % (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
1279 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[iMdl]);
1280 return (RTHCPHYS)paPfns[iMdlPfn] << PAGE_SHIFT;
1281 }
1282
1283 switch (pMemNt->Core.enmType)
1284 {
1285 case RTR0MEMOBJTYPE_MAPPING:
1286 return rtR0MemObjNativeGetPagePhysAddr(pMemNt->Core.uRel.Child.pParent, iPage);
1287
1288 case RTR0MEMOBJTYPE_PHYS:
1289 return pMemNt->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT);
1290
1291 case RTR0MEMOBJTYPE_PAGE:
1292 case RTR0MEMOBJTYPE_PHYS_NC:
1293 case RTR0MEMOBJTYPE_LOW:
1294 case RTR0MEMOBJTYPE_CONT:
1295 case RTR0MEMOBJTYPE_LOCK:
1296 default:
1297 AssertMsgFailed(("%d\n", pMemNt->Core.enmType));
1298 case RTR0MEMOBJTYPE_RES_VIRT:
1299 return NIL_RTHCPHYS;
1300 }
1301}
1302
1303
1304DECLHIDDEN(int) rtR0MemObjNativeZeroInitWithoutMapping(PRTR0MEMOBJINTERNAL pMem)
1305{
1306 RT_NOREF(pMem);
1307 return VERR_NOT_IMPLEMENTED;
1308}
1309
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette