VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/nt/memobj-r0drv-nt.cpp@ 100357

Last change on this file since 100357 was 100357, checked in by vboxsync, 18 months ago

Runtime/RTR0MemObj*: Add PhysHighest parameter to RTR0MemObjAllocCont to indicate the maximum allowed physical address for an allocation, bugref:10457 [second attempt]

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 46.9 KB
Line 
1/* $Id: memobj-r0drv-nt.cpp 100357 2023-07-04 07:00:26Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, NT.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * The contents of this file may alternatively be used under the terms
26 * of the Common Development and Distribution License Version 1.0
27 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
28 * in the VirtualBox distribution, in which case the provisions of the
29 * CDDL are applicable instead of those of the GPL.
30 *
31 * You may elect to license modified versions of this file under the
32 * terms and conditions of either the GPL or the CDDL or both.
33 *
34 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
35 */
36
37
38/*********************************************************************************************************************************
39* Header Files *
40*********************************************************************************************************************************/
41#include "the-nt-kernel.h"
42
43#include <iprt/memobj.h>
44#include <iprt/alloc.h>
45#include <iprt/assert.h>
46#include <iprt/err.h>
47#include <iprt/log.h>
48#include <iprt/param.h>
49#include <iprt/string.h>
50#include <iprt/process.h>
51#include "internal/memobj.h"
52#include "internal-r0drv-nt.h"
53
54
55/*********************************************************************************************************************************
56* Defined Constants And Macros *
57*********************************************************************************************************************************/
58/** Maximum number of bytes we try to lock down in one go.
59 * This is supposed to have a limit right below 256MB, but this appears
60 * to actually be much lower. The values here have been determined experimentally.
61 */
62#ifdef RT_ARCH_X86
63# define MAX_LOCK_MEM_SIZE (32*1024*1024) /* 32MB */
64#endif
65#ifdef RT_ARCH_AMD64
66# define MAX_LOCK_MEM_SIZE (24*1024*1024) /* 24MB */
67#endif
68
69/* Newer WDK constants: */
70#ifndef MM_ALLOCATE_REQUIRE_CONTIGUOUS_CHUNKS
71# define MM_ALLOCATE_REQUIRE_CONTIGUOUS_CHUNKS 0x20
72#endif
73#ifndef MM_ALLOCATE_FAST_LARGE_PAGES
74# define MM_ALLOCATE_FAST_LARGE_PAGES 0x40
75#endif
76
77
78/*********************************************************************************************************************************
79* Structures and Typedefs *
80*********************************************************************************************************************************/
81/**
82 * The NT version of the memory object structure.
83 */
84typedef struct RTR0MEMOBJNT
85{
86 /** The core structure. */
87 RTR0MEMOBJINTERNAL Core;
88 /** Used MmAllocatePagesForMdl(). */
89 bool fAllocatedPagesForMdl;
90 /** Set if this is sub-section of the parent. */
91 bool fSubMapping;
92 /** Pointer returned by MmSecureVirtualMemory */
93 PVOID pvSecureMem;
94 /** The number of PMDLs (memory descriptor lists) in the array. */
95 uint32_t cMdls;
96 /** Array of MDL pointers. (variable size) */
97 PMDL apMdls[1];
98} RTR0MEMOBJNT;
99/** Pointer to the NT version of the memory object structure. */
100typedef RTR0MEMOBJNT *PRTR0MEMOBJNT;
101
102
103
104DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
105{
106 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
107
108 /*
109 * Deal with it on a per type basis (just as a variation).
110 */
111 switch (pMemNt->Core.enmType)
112 {
113 case RTR0MEMOBJTYPE_LOW:
114 if (pMemNt->fAllocatedPagesForMdl)
115 {
116 Assert(pMemNt->Core.pv && pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
117 MmUnmapLockedPages(pMemNt->Core.pv, pMemNt->apMdls[0]);
118 pMemNt->Core.pv = NULL;
119 if (pMemNt->pvSecureMem)
120 {
121 g_pfnrtMmUnsecureVirtualMemory(pMemNt->pvSecureMem);
122 pMemNt->pvSecureMem = NULL;
123 }
124
125 g_pfnrtMmFreePagesFromMdl(pMemNt->apMdls[0]);
126 ExFreePool(pMemNt->apMdls[0]);
127 pMemNt->apMdls[0] = NULL;
128 pMemNt->cMdls = 0;
129 break;
130 }
131 AssertFailed();
132 break;
133
134 case RTR0MEMOBJTYPE_PAGE:
135 Assert(pMemNt->Core.pv);
136 if (pMemNt->fAllocatedPagesForMdl)
137 {
138 Assert(pMemNt->Core.pv && pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
139 Assert(pMemNt->pvSecureMem == NULL);
140 MmUnmapLockedPages(pMemNt->Core.pv, pMemNt->apMdls[0]);
141 g_pfnrtMmFreePagesFromMdl(pMemNt->apMdls[0]);
142 ExFreePool(pMemNt->apMdls[0]);
143 }
144 else
145 {
146 if (g_pfnrtExFreePoolWithTag)
147 g_pfnrtExFreePoolWithTag(pMemNt->Core.pv, IPRT_NT_POOL_TAG);
148 else
149 ExFreePool(pMemNt->Core.pv);
150
151 Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
152 IoFreeMdl(pMemNt->apMdls[0]);
153 }
154 pMemNt->Core.pv = NULL;
155 pMemNt->apMdls[0] = NULL;
156 pMemNt->cMdls = 0;
157 break;
158
159 case RTR0MEMOBJTYPE_CONT:
160 Assert(pMemNt->Core.pv);
161 MmFreeContiguousMemory(pMemNt->Core.pv);
162 pMemNt->Core.pv = NULL;
163
164 Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
165 IoFreeMdl(pMemNt->apMdls[0]);
166 pMemNt->apMdls[0] = NULL;
167 pMemNt->cMdls = 0;
168 break;
169
170 case RTR0MEMOBJTYPE_PHYS:
171 /* rtR0MemObjNativeEnterPhys? */
172 if (!pMemNt->Core.u.Phys.fAllocated)
173 {
174 Assert(!pMemNt->fAllocatedPagesForMdl);
175 /* Nothing to do here. */
176 break;
177 }
178 RT_FALL_THRU();
179
180 case RTR0MEMOBJTYPE_PHYS_NC:
181 if (pMemNt->fAllocatedPagesForMdl)
182 {
183 g_pfnrtMmFreePagesFromMdl(pMemNt->apMdls[0]);
184 ExFreePool(pMemNt->apMdls[0]);
185 pMemNt->apMdls[0] = NULL;
186 pMemNt->cMdls = 0;
187 break;
188 }
189 AssertFailed();
190 break;
191
192 case RTR0MEMOBJTYPE_LOCK:
193 if (pMemNt->pvSecureMem)
194 {
195 g_pfnrtMmUnsecureVirtualMemory(pMemNt->pvSecureMem);
196 pMemNt->pvSecureMem = NULL;
197 }
198 for (uint32_t i = 0; i < pMemNt->cMdls; i++)
199 {
200 MmUnlockPages(pMemNt->apMdls[i]);
201 IoFreeMdl(pMemNt->apMdls[i]);
202 pMemNt->apMdls[i] = NULL;
203 }
204 break;
205
206 case RTR0MEMOBJTYPE_RES_VIRT:
207/* if (pMemNt->Core.u.ResVirt.R0Process == NIL_RTR0PROCESS)
208 {
209 }
210 else
211 {
212 }*/
213 AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n"));
214 return VERR_INTERNAL_ERROR;
215 break;
216
217 case RTR0MEMOBJTYPE_MAPPING:
218 {
219 PRTR0MEMOBJNT pMemNtParent = (PRTR0MEMOBJNT)pMemNt->Core.uRel.Child.pParent;
220 Assert(pMemNtParent);
221 Assert(pMemNt->Core.pv);
222 Assert((pMemNt->cMdls == 0 && !pMemNt->fSubMapping) || (pMemNt->cMdls == 1 && pMemNt->fSubMapping));
223 if (pMemNtParent->cMdls)
224 {
225 Assert(pMemNtParent->cMdls == 1 && pMemNtParent->apMdls[0]);
226 Assert( pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS
227 || pMemNt->Core.u.Mapping.R0Process == RTR0ProcHandleSelf());
228 if (!pMemNt->cMdls)
229 MmUnmapLockedPages(pMemNt->Core.pv, pMemNtParent->apMdls[0]);
230 else
231 {
232 MmUnmapLockedPages(pMemNt->Core.pv, pMemNt->apMdls[0]);
233 IoFreeMdl(pMemNt->apMdls[0]);
234 pMemNt->apMdls[0] = NULL;
235 }
236 }
237 else
238 {
239 Assert( pMemNtParent->Core.enmType == RTR0MEMOBJTYPE_PHYS
240 && !pMemNtParent->Core.u.Phys.fAllocated);
241 Assert(pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS);
242 Assert(!pMemNt->fSubMapping);
243 MmUnmapIoSpace(pMemNt->Core.pv, pMemNt->Core.cb);
244 }
245 pMemNt->Core.pv = NULL;
246 break;
247 }
248
249 default:
250 AssertMsgFailed(("enmType=%d\n", pMemNt->Core.enmType));
251 return VERR_INTERNAL_ERROR;
252 }
253
254 return VINF_SUCCESS;
255}
256
257
258DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, const char *pszTag)
259{
260 AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
261 RT_NOREF1(fExecutable);
262
263 /*
264 * Use MmAllocatePagesForMdl if the allocation is a little bit big.
265 */
266 int rc = VERR_NO_PAGE_MEMORY;
267 if ( cb > _1M
268 && g_pfnrtMmAllocatePagesForMdl
269 && g_pfnrtMmFreePagesFromMdl
270 && g_pfnrtMmMapLockedPagesSpecifyCache)
271 {
272 PHYSICAL_ADDRESS Zero;
273 Zero.QuadPart = 0;
274 PHYSICAL_ADDRESS HighAddr;
275 HighAddr.QuadPart = MAXLONGLONG;
276 PMDL pMdl = g_pfnrtMmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
277 if (pMdl)
278 {
279 if (MmGetMdlByteCount(pMdl) >= cb)
280 {
281 __try
282 {
283 void *pv = g_pfnrtMmMapLockedPagesSpecifyCache(pMdl, KernelMode, MmCached, NULL /* no base address */,
284 FALSE /* no bug check on failure */, NormalPagePriority);
285 if (pv)
286 {
287#ifdef RT_ARCH_AMD64
288 if (fExecutable)
289 MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
290#endif
291
292 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PAGE, pv, cb, pszTag);
293 if (pMemNt)
294 {
295 pMemNt->Core.fFlags |= RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC;
296 pMemNt->fAllocatedPagesForMdl = true;
297 pMemNt->cMdls = 1;
298 pMemNt->apMdls[0] = pMdl;
299 *ppMem = &pMemNt->Core;
300 return VINF_SUCCESS;
301 }
302 MmUnmapLockedPages(pv, pMdl);
303 }
304 }
305 __except(EXCEPTION_EXECUTE_HANDLER)
306 {
307# ifdef LOG_ENABLED
308 NTSTATUS rcNt = GetExceptionCode();
309 Log(("rtR0MemObjNativeAllocLow: Exception Code %#x\n", rcNt));
310# endif
311 /* nothing */
312 }
313 }
314 g_pfnrtMmFreePagesFromMdl(pMdl);
315 ExFreePool(pMdl);
316 }
317 }
318
319 /*
320 * Try allocate the memory and create an MDL for them so
321 * we can query the physical addresses and do mappings later
322 * without running into out-of-memory conditions and similar problems.
323 */
324 void *pv;
325 if (g_pfnrtExAllocatePoolWithTag)
326 pv = g_pfnrtExAllocatePoolWithTag(NonPagedPool, cb, IPRT_NT_POOL_TAG);
327 else
328 pv = ExAllocatePool(NonPagedPool, cb);
329 if (pv)
330 {
331 PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
332 if (pMdl)
333 {
334 MmBuildMdlForNonPagedPool(pMdl);
335#ifdef RT_ARCH_AMD64
336 if (fExecutable)
337 MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
338#endif
339
340 /*
341 * Create the IPRT memory object.
342 */
343 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PAGE, pv, cb, pszTag);
344 if (pMemNt)
345 {
346 pMemNt->Core.fFlags |= RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC;
347 pMemNt->cMdls = 1;
348 pMemNt->apMdls[0] = pMdl;
349 *ppMem = &pMemNt->Core;
350 return VINF_SUCCESS;
351 }
352
353 rc = VERR_NO_MEMORY;
354 IoFreeMdl(pMdl);
355 }
356 ExFreePool(pv);
357 }
358 return rc;
359}
360
361
362/**
363 * Helper for rtR0MemObjNativeAllocLarge that verifies the result.
364 */
365static bool rtR0MemObjNtVerifyLargePageAlloc(PMDL pMdl, size_t cb, size_t cbLargePage)
366{
367 if (MmGetMdlByteCount(pMdl) >= cb)
368 {
369 PPFN_NUMBER const paPfns = MmGetMdlPfnArray(pMdl);
370 size_t const cPagesPerLargePage = cbLargePage >> PAGE_SHIFT;
371 size_t const cLargePages = cb / cbLargePage;
372 size_t iPage = 0;
373 for (size_t iLargePage = 0; iLargePage < cLargePages; iLargePage++)
374 {
375 PFN_NUMBER Pfn = paPfns[iPage];
376 if (!(Pfn & (cbLargePage >> PAGE_SHIFT) - 1U))
377 {
378 for (size_t iSubPage = 1; iSubPage < cPagesPerLargePage; iSubPage++)
379 {
380 iPage++;
381 Pfn++;
382 if (paPfns[iPage] == Pfn)
383 { /* likely */ }
384 else
385 {
386 Log(("rtR0MemObjNativeAllocLarge: Subpage %#zu in large page #%zu is not contiguous: %#x, expected %#x\n",
387 iSubPage, iLargePage, paPfns[iPage], Pfn));
388 return false;
389 }
390 }
391 }
392 else
393 {
394 Log(("rtR0MemObjNativeAllocLarge: Large page #%zu is misaligned: %#x, cbLargePage=%#zx\n",
395 iLargePage, Pfn, cbLargePage));
396 return false;
397 }
398 }
399 return true;
400 }
401 Log(("rtR0MemObjNativeAllocLarge: Got back too few pages: %#zx, requested %#zx\n", MmGetMdlByteCount(pMdl), cb));
402 return false;
403}
404
405
406DECLHIDDEN(int) rtR0MemObjNativeAllocLarge(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, size_t cbLargePage, uint32_t fFlags,
407 const char *pszTag)
408{
409 /*
410 * Need the MmAllocatePagesForMdlEx function so we can specify flags.
411 */
412 if ( g_uRtNtVersion >= RTNT_MAKE_VERSION(6,1) /* Windows 7+ */
413 && g_pfnrtMmAllocatePagesForMdlEx
414 && g_pfnrtMmFreePagesFromMdl
415 && g_pfnrtMmMapLockedPagesSpecifyCache)
416 {
417 ULONG fNtFlags = MM_ALLOCATE_FULLY_REQUIRED /* W7+: Make it fail if we don't get all we ask for.*/
418 | MM_ALLOCATE_REQUIRE_CONTIGUOUS_CHUNKS; /* W7+: The SkipBytes chunks must be physcially contiguous. */
419 if ((fFlags & RTMEMOBJ_ALLOC_LARGE_F_FAST) && g_uRtNtVersion >= RTNT_MAKE_VERSION(6, 2))
420 fNtFlags |= MM_ALLOCATE_FAST_LARGE_PAGES; /* W8+: Don't try too hard, just fail if not enough handy. */
421
422 PHYSICAL_ADDRESS Zero;
423 Zero.QuadPart = 0;
424
425 PHYSICAL_ADDRESS HighAddr;
426 HighAddr.QuadPart = MAXLONGLONG;
427
428 PHYSICAL_ADDRESS Skip;
429 Skip.QuadPart = cbLargePage;
430
431 int rc;
432 PMDL const pMdl = g_pfnrtMmAllocatePagesForMdlEx(Zero, HighAddr, Skip, cb, MmCached, fNtFlags);
433 if (pMdl)
434 {
435 /* Verify the result. */
436 if (rtR0MemObjNtVerifyLargePageAlloc(pMdl, cb, cbLargePage))
437 {
438 /*
439 * Map the allocation into kernel space. Unless the memory is already mapped
440 * somewhere (seems to be actually), I guess it's unlikely that we'll get a
441 * large page aligned mapping back here...
442 */
443 __try
444 {
445 void *pv = g_pfnrtMmMapLockedPagesSpecifyCache(pMdl, KernelMode, MmCached, NULL /* no base address */,
446 FALSE /* no bug check on failure */, NormalPagePriority);
447 if (pv)
448 {
449 /*
450 * Create the memory object.
451 */
452 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PAGE, pv, cb, pszTag);
453 if (pMemNt)
454 {
455 pMemNt->Core.fFlags |= RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC;
456 pMemNt->fAllocatedPagesForMdl = true;
457 pMemNt->cMdls = 1;
458 pMemNt->apMdls[0] = pMdl;
459 *ppMem = &pMemNt->Core;
460 return VINF_SUCCESS;
461 }
462
463 MmUnmapLockedPages(pv, pMdl);
464 }
465 }
466 __except(EXCEPTION_EXECUTE_HANDLER)
467 {
468#ifdef LOG_ENABLED
469 NTSTATUS rcNt = GetExceptionCode();
470 Log(("rtR0MemObjNativeAllocLarge: Exception Code %#x\n", rcNt));
471#endif
472 /* nothing */
473 }
474 }
475
476 g_pfnrtMmFreePagesFromMdl(pMdl);
477 ExFreePool(pMdl);
478 rc = VERR_NO_MEMORY;
479 }
480 else
481 rc = fFlags & RTMEMOBJ_ALLOC_LARGE_F_FAST ? VERR_TRY_AGAIN : VERR_NO_MEMORY;
482 return rc;
483 }
484
485 return rtR0MemObjFallbackAllocLarge(ppMem, cb, cbLargePage, fFlags, pszTag);
486}
487
488
489DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, const char *pszTag)
490{
491 AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
492
493 /*
494 * Try see if we get lucky first...
495 * (We could probably just assume we're lucky on NT4.)
496 */
497 int rc = rtR0MemObjNativeAllocPage(ppMem, cb, fExecutable, pszTag);
498 if (RT_SUCCESS(rc))
499 {
500 size_t iPage = cb >> PAGE_SHIFT;
501 while (iPage-- > 0)
502 if (rtR0MemObjNativeGetPagePhysAddr(*ppMem, iPage) >= _4G)
503 {
504 rc = VERR_NO_LOW_MEMORY;
505 break;
506 }
507 if (RT_SUCCESS(rc))
508 return rc;
509
510 /* The following ASSUMES that rtR0MemObjNativeAllocPage returns a completed object. */
511 RTR0MemObjFree(*ppMem, false);
512 *ppMem = NULL;
513 }
514
515 /*
516 * Use MmAllocatePagesForMdl to specify the range of physical addresses we wish to use.
517 */
518 if ( g_pfnrtMmAllocatePagesForMdl
519 && g_pfnrtMmFreePagesFromMdl
520 && g_pfnrtMmMapLockedPagesSpecifyCache)
521 {
522 PHYSICAL_ADDRESS Zero;
523 Zero.QuadPart = 0;
524 PHYSICAL_ADDRESS HighAddr;
525 HighAddr.QuadPart = _4G - 1;
526 PMDL pMdl = g_pfnrtMmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
527 if (pMdl)
528 {
529 if (MmGetMdlByteCount(pMdl) >= cb)
530 {
531 __try
532 {
533 void *pv = g_pfnrtMmMapLockedPagesSpecifyCache(pMdl, KernelMode, MmCached, NULL /* no base address */,
534 FALSE /* no bug check on failure */, NormalPagePriority);
535 if (pv)
536 {
537 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_LOW, pv, cb, pszTag);
538 if (pMemNt)
539 {
540 pMemNt->Core.fFlags |= RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC;
541 pMemNt->fAllocatedPagesForMdl = true;
542 pMemNt->cMdls = 1;
543 pMemNt->apMdls[0] = pMdl;
544 *ppMem = &pMemNt->Core;
545 return VINF_SUCCESS;
546 }
547 MmUnmapLockedPages(pv, pMdl);
548 }
549 }
550 __except(EXCEPTION_EXECUTE_HANDLER)
551 {
552# ifdef LOG_ENABLED
553 NTSTATUS rcNt = GetExceptionCode();
554 Log(("rtR0MemObjNativeAllocLow: Exception Code %#x\n", rcNt));
555# endif
556 /* nothing */
557 }
558 }
559 g_pfnrtMmFreePagesFromMdl(pMdl);
560 ExFreePool(pMdl);
561 }
562 }
563
564 /*
565 * Fall back on contiguous memory...
566 */
567 return rtR0MemObjNativeAllocCont(ppMem, cb, _4G - 1, fExecutable, pszTag);
568}
569
570
571/**
572 * Internal worker for rtR0MemObjNativeAllocCont(), rtR0MemObjNativeAllocPhys()
573 * and rtR0MemObjNativeAllocPhysNC() that takes a max physical address in addition
574 * to what rtR0MemObjNativeAllocCont() does.
575 *
576 * @returns IPRT status code.
577 * @param ppMem Where to store the pointer to the ring-0 memory object.
578 * @param cb The size.
579 * @param fExecutable Whether the mapping should be executable or not.
580 * @param PhysHighest The highest physical address for the pages in allocation.
581 * @param uAlignment The alignment of the physical memory to allocate.
582 * Supported values are PAGE_SIZE, _2M, _4M and _1G.
583 * @param pszTag Allocation tag used for statistics and such.
584 */
585static int rtR0MemObjNativeAllocContEx(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, RTHCPHYS PhysHighest,
586 size_t uAlignment, const char *pszTag)
587{
588 AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
589 RT_NOREF1(fExecutable);
590
591 /*
592 * Allocate the memory and create an MDL for it.
593 */
594 PHYSICAL_ADDRESS PhysAddrHighest;
595 PhysAddrHighest.QuadPart = PhysHighest;
596 void *pv;
597 if (g_pfnrtMmAllocateContiguousMemorySpecifyCache)
598 {
599 PHYSICAL_ADDRESS PhysAddrLowest, PhysAddrBoundary;
600 PhysAddrLowest.QuadPart = 0;
601 PhysAddrBoundary.QuadPart = (uAlignment == PAGE_SIZE) ? 0 : uAlignment;
602 pv = g_pfnrtMmAllocateContiguousMemorySpecifyCache(cb, PhysAddrLowest, PhysAddrHighest, PhysAddrBoundary, MmCached);
603 }
604 else if (uAlignment == PAGE_SIZE)
605 pv = MmAllocateContiguousMemory(cb, PhysAddrHighest);
606 else
607 return VERR_NOT_SUPPORTED;
608 if (!pv)
609 return VERR_NO_MEMORY;
610
611 PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
612 if (pMdl)
613 {
614 MmBuildMdlForNonPagedPool(pMdl);
615#ifdef RT_ARCH_AMD64
616 if (fExecutable)
617 MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
618#endif
619
620 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_CONT, pv, cb, pszTag);
621 if (pMemNt)
622 {
623 pMemNt->Core.fFlags |= RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC;
624 pMemNt->Core.u.Cont.Phys = (RTHCPHYS)*MmGetMdlPfnArray(pMdl) << PAGE_SHIFT;
625 pMemNt->cMdls = 1;
626 pMemNt->apMdls[0] = pMdl;
627 *ppMem = &pMemNt->Core;
628 return VINF_SUCCESS;
629 }
630
631 IoFreeMdl(pMdl);
632 }
633 MmFreeContiguousMemory(pv);
634 return VERR_NO_MEMORY;
635}
636
637
638DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest,
639 bool fExecutable, const char *pszTag)
640{
641 return rtR0MemObjNativeAllocContEx(ppMem, cb, fExecutable, PhysHighest, PAGE_SIZE /* alignment */, pszTag);
642}
643
644
645DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment,
646 const char *pszTag)
647{
648 /*
649 * Try and see if we're lucky and get a contiguous chunk from MmAllocatePagesForMdl.
650 *
651 * This is preferable to using MmAllocateContiguousMemory because there are
652 * a few situations where the memory shouldn't be mapped, like for instance
653 * VT-x control memory. Since these are rather small allocations (one or
654 * two pages) MmAllocatePagesForMdl will probably be able to satisfy the
655 * request.
656 *
657 * If the allocation is big, the chances are *probably* not very good. The
658 * current limit is kind of random...
659 */
660 if ( cb < _128K
661 && uAlignment == PAGE_SIZE
662 && g_pfnrtMmAllocatePagesForMdl
663 && g_pfnrtMmFreePagesFromMdl)
664 {
665 PHYSICAL_ADDRESS Zero;
666 Zero.QuadPart = 0;
667 PHYSICAL_ADDRESS HighAddr;
668 HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
669 PMDL pMdl = g_pfnrtMmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
670 if (pMdl)
671 {
672 if (MmGetMdlByteCount(pMdl) >= cb)
673 {
674 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMdl);
675 PFN_NUMBER Pfn = paPfns[0] + 1;
676 const size_t cPages = cb >> PAGE_SHIFT;
677 size_t iPage;
678 for (iPage = 1; iPage < cPages; iPage++, Pfn++)
679 if (paPfns[iPage] != Pfn)
680 break;
681 if (iPage >= cPages)
682 {
683 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb, pszTag);
684 if (pMemNt)
685 {
686 pMemNt->Core.fFlags |= RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC;
687 pMemNt->Core.u.Phys.fAllocated = true;
688 pMemNt->Core.u.Phys.PhysBase = (RTHCPHYS)paPfns[0] << PAGE_SHIFT;
689 pMemNt->fAllocatedPagesForMdl = true;
690 pMemNt->cMdls = 1;
691 pMemNt->apMdls[0] = pMdl;
692 *ppMem = &pMemNt->Core;
693 return VINF_SUCCESS;
694 }
695 }
696 }
697 g_pfnrtMmFreePagesFromMdl(pMdl);
698 ExFreePool(pMdl);
699 }
700 }
701
702 return rtR0MemObjNativeAllocContEx(ppMem, cb, false, PhysHighest, uAlignment, pszTag);
703}
704
705
706DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, const char *pszTag)
707{
708 if (g_pfnrtMmAllocatePagesForMdl && g_pfnrtMmFreePagesFromMdl)
709 {
710 /** @todo use the Ex version with the fail-if-not-all-requested-pages flag
711 * when possible. */
712 PHYSICAL_ADDRESS Zero;
713 Zero.QuadPart = 0;
714 PHYSICAL_ADDRESS HighAddr;
715 HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
716 PMDL pMdl = g_pfnrtMmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
717 if (pMdl)
718 {
719 if (MmGetMdlByteCount(pMdl) >= cb)
720 {
721 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS_NC, NULL, cb, pszTag);
722 if (pMemNt)
723 {
724 pMemNt->Core.fFlags |= RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC;
725 pMemNt->fAllocatedPagesForMdl = true;
726 pMemNt->cMdls = 1;
727 pMemNt->apMdls[0] = pMdl;
728 *ppMem = &pMemNt->Core;
729 return VINF_SUCCESS;
730 }
731 }
732 g_pfnrtMmFreePagesFromMdl(pMdl);
733 ExFreePool(pMdl);
734 }
735 return VERR_NO_MEMORY;
736 }
737 return VERR_NOT_SUPPORTED;
738}
739
740
741DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy,
742 const char *pszTag)
743{
744 AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE || uCachePolicy == RTMEM_CACHE_POLICY_MMIO, VERR_NOT_SUPPORTED);
745
746 /*
747 * Validate the address range and create a descriptor for it.
748 */
749 PFN_NUMBER Pfn = (PFN_NUMBER)(Phys >> PAGE_SHIFT);
750 if (((RTHCPHYS)Pfn << PAGE_SHIFT) != Phys)
751 return VERR_ADDRESS_TOO_BIG;
752
753 /*
754 * Create the IPRT memory object.
755 */
756 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb, pszTag);
757 if (pMemNt)
758 {
759 pMemNt->Core.u.Phys.PhysBase = Phys;
760 pMemNt->Core.u.Phys.fAllocated = false;
761 pMemNt->Core.u.Phys.uCachePolicy = uCachePolicy;
762 *ppMem = &pMemNt->Core;
763 return VINF_SUCCESS;
764 }
765 return VERR_NO_MEMORY;
766}
767
768
769/**
770 * Internal worker for locking down pages.
771 *
772 * @return IPRT status code.
773 *
774 * @param ppMem Where to store the memory object pointer.
775 * @param pv First page.
776 * @param cb Number of bytes.
777 * @param fAccess The desired access, a combination of RTMEM_PROT_READ
778 * and RTMEM_PROT_WRITE.
779 * @param R0Process The process \a pv and \a cb refers to.
780 * @param pszTag Allocation tag used for statistics and such.
781 */
782static int rtR0MemObjNtLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process,
783 const char *pszTag)
784{
785 /*
786 * Calc the number of MDLs we need and allocate the memory object structure.
787 */
788 size_t cMdls = cb / MAX_LOCK_MEM_SIZE;
789 if (cb % MAX_LOCK_MEM_SIZE)
790 cMdls++;
791 if (cMdls >= UINT32_MAX)
792 return VERR_OUT_OF_RANGE;
793 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(RT_UOFFSETOF_DYN(RTR0MEMOBJNT, apMdls[cMdls]),
794 RTR0MEMOBJTYPE_LOCK, pv, cb, pszTag);
795 if (!pMemNt)
796 return VERR_NO_MEMORY;
797
798 /*
799 * Loop locking down the sub parts of the memory.
800 */
801 int rc = VINF_SUCCESS;
802 size_t cbTotal = 0;
803 uint8_t *pb = (uint8_t *)pv;
804 uint32_t iMdl;
805 for (iMdl = 0; iMdl < cMdls; iMdl++)
806 {
807 /*
808 * Calc the Mdl size and allocate it.
809 */
810 size_t cbCur = cb - cbTotal;
811 if (cbCur > MAX_LOCK_MEM_SIZE)
812 cbCur = MAX_LOCK_MEM_SIZE;
813 AssertMsg(cbCur, ("cbCur: 0!\n"));
814 PMDL pMdl = IoAllocateMdl(pb, (ULONG)cbCur, FALSE, FALSE, NULL);
815 if (!pMdl)
816 {
817 rc = VERR_NO_MEMORY;
818 break;
819 }
820
821 /*
822 * Lock the pages.
823 */
824 __try
825 {
826 MmProbeAndLockPages(pMdl,
827 R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode,
828 fAccess == RTMEM_PROT_READ
829 ? IoReadAccess
830 : fAccess == RTMEM_PROT_WRITE
831 ? IoWriteAccess
832 : IoModifyAccess);
833
834 pMemNt->apMdls[iMdl] = pMdl;
835 pMemNt->cMdls++;
836 }
837 __except(EXCEPTION_EXECUTE_HANDLER)
838 {
839 IoFreeMdl(pMdl);
840 rc = VERR_LOCK_FAILED;
841 break;
842 }
843
844 if ( R0Process != NIL_RTR0PROCESS
845 && g_pfnrtMmSecureVirtualMemory
846 && g_pfnrtMmUnsecureVirtualMemory)
847 {
848 /* Make sure the user process can't change the allocation. */
849 pMemNt->pvSecureMem = g_pfnrtMmSecureVirtualMemory(pv, cb,
850 fAccess & RTMEM_PROT_WRITE
851 ? PAGE_READWRITE
852 : PAGE_READONLY);
853 if (!pMemNt->pvSecureMem)
854 {
855 rc = VERR_NO_MEMORY;
856 break;
857 }
858 }
859
860 /* next */
861 cbTotal += cbCur;
862 pb += cbCur;
863 }
864 if (RT_SUCCESS(rc))
865 {
866 Assert(pMemNt->cMdls == cMdls);
867 pMemNt->Core.u.Lock.R0Process = R0Process;
868 *ppMem = &pMemNt->Core;
869 return rc;
870 }
871
872 /*
873 * We failed, perform cleanups.
874 */
875 while (iMdl-- > 0)
876 {
877 MmUnlockPages(pMemNt->apMdls[iMdl]);
878 IoFreeMdl(pMemNt->apMdls[iMdl]);
879 pMemNt->apMdls[iMdl] = NULL;
880 }
881 if (pMemNt->pvSecureMem)
882 {
883 if (g_pfnrtMmUnsecureVirtualMemory)
884 g_pfnrtMmUnsecureVirtualMemory(pMemNt->pvSecureMem);
885 pMemNt->pvSecureMem = NULL;
886 }
887
888 rtR0MemObjDelete(&pMemNt->Core);
889 return rc;
890}
891
892
893DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess,
894 RTR0PROCESS R0Process, const char *pszTag)
895{
896 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
897 /* (Can use MmProbeAndLockProcessPages if we need to mess with other processes later.) */
898 return rtR0MemObjNtLock(ppMem, (void *)R3Ptr, cb, fAccess, R0Process, pszTag);
899}
900
901
902DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess, const char *pszTag)
903{
904 return rtR0MemObjNtLock(ppMem, pv, cb, fAccess, NIL_RTR0PROCESS, pszTag);
905}
906
907
908DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment,
909 const char *pszTag)
910{
911 /*
912 * MmCreateSection(SEC_RESERVE) + MmMapViewInSystemSpace perhaps?
913 * Or MmAllocateMappingAddress?
914 */
915 RT_NOREF(ppMem, pvFixed, cb, uAlignment, pszTag);
916 return VERR_NOT_SUPPORTED;
917}
918
919
920DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment,
921 RTR0PROCESS R0Process, const char *pszTag)
922{
923 /*
924 * ZeCreateSection(SEC_RESERVE) + ZwMapViewOfSection perhaps?
925 */
926 RT_NOREF(ppMem, R3PtrFixed, cb, uAlignment, R0Process, pszTag);
927 return VERR_NOT_SUPPORTED;
928}
929
930
931/**
932 * Internal worker for rtR0MemObjNativeMapKernel and rtR0MemObjNativeMapUser.
933 *
934 * @returns IPRT status code.
935 * @param ppMem Where to store the memory object for the mapping.
936 * @param pMemToMap The memory object to map.
937 * @param pvFixed Where to map it. (void *)-1 if anywhere is fine.
938 * @param uAlignment The alignment requirement for the mapping.
939 * @param fProt The desired page protection for the mapping.
940 * @param R0Process If NIL_RTR0PROCESS map into system (kernel) memory.
941 * If not nil, it's the current process.
942 * @param offSub Offset into @a pMemToMap to start mapping.
943 * @param cbSub The number of bytes to map from @a pMapToMem. 0 if
944 * we're to map everything. Non-zero if @a offSub is
945 * non-zero.
946 * @param pszTag Allocation tag used for statistics and such.
947 */
948static int rtR0MemObjNtMap(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
949 unsigned fProt, RTR0PROCESS R0Process, size_t offSub, size_t cbSub, const char *pszTag)
950{
951 int rc = VERR_MAP_FAILED;
952
953 /*
954 * Check that the specified alignment is supported.
955 */
956 if (uAlignment > PAGE_SIZE)
957 return VERR_NOT_SUPPORTED;
958
959 /*
960 * There are two basic cases here, either we've got an MDL and can
961 * map it using MmMapLockedPages, or we've got a contiguous physical
962 * range (MMIO most likely) and can use MmMapIoSpace.
963 */
964 PRTR0MEMOBJNT pMemNtToMap = (PRTR0MEMOBJNT)pMemToMap;
965 if (pMemNtToMap->cMdls)
966 {
967 /* don't attempt map locked regions with more than one mdl. */
968 if (pMemNtToMap->cMdls != 1)
969 return VERR_NOT_SUPPORTED;
970
971 /* Need g_pfnrtMmMapLockedPagesSpecifyCache to map to a specific address. */
972 if (pvFixed != (void *)-1 && g_pfnrtMmMapLockedPagesSpecifyCache == NULL)
973 return VERR_NOT_SUPPORTED;
974
975 /* we can't map anything to the first page, sorry. */
976 if (pvFixed == 0)
977 return VERR_NOT_SUPPORTED;
978
979 /* only one system mapping for now - no time to figure out MDL restrictions right now. */
980 if ( pMemNtToMap->Core.uRel.Parent.cMappings
981 && R0Process == NIL_RTR0PROCESS)
982 {
983 if (pMemNtToMap->Core.enmType != RTR0MEMOBJTYPE_PHYS_NC)
984 return VERR_NOT_SUPPORTED;
985 uint32_t iMapping = pMemNtToMap->Core.uRel.Parent.cMappings;
986 while (iMapping-- > 0)
987 {
988 PRTR0MEMOBJNT pMapping = (PRTR0MEMOBJNT)pMemNtToMap->Core.uRel.Parent.papMappings[iMapping];
989 if ( pMapping->Core.enmType != RTR0MEMOBJTYPE_MAPPING
990 || pMapping->Core.u.Mapping.R0Process == NIL_RTR0PROCESS)
991 return VERR_NOT_SUPPORTED;
992 }
993 }
994
995 /* Create a partial MDL if this is a sub-range request. */
996 PMDL pMdl;
997 if (!offSub && !cbSub)
998 pMdl = pMemNtToMap->apMdls[0];
999 else
1000 {
1001 pMdl = IoAllocateMdl(NULL, (ULONG)cbSub, FALSE, FALSE, NULL);
1002 if (pMdl)
1003 IoBuildPartialMdl(pMemNtToMap->apMdls[0], pMdl,
1004 (uint8_t *)MmGetMdlVirtualAddress(pMemNtToMap->apMdls[0]) + offSub, (ULONG)cbSub);
1005 else
1006 {
1007 IoFreeMdl(pMdl);
1008 return VERR_NO_MEMORY;
1009 }
1010 }
1011
1012 __try
1013 {
1014 /** @todo uAlignment */
1015 /** @todo How to set the protection on the pages? */
1016 void *pv;
1017 if (g_pfnrtMmMapLockedPagesSpecifyCache)
1018 pv = g_pfnrtMmMapLockedPagesSpecifyCache(pMdl,
1019 R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode,
1020 MmCached,
1021 pvFixed != (void *)-1 ? pvFixed : NULL,
1022 FALSE /* no bug check on failure */,
1023 NormalPagePriority);
1024 else
1025 pv = MmMapLockedPages(pMdl, R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode);
1026 if (pv)
1027 {
1028 NOREF(fProt);
1029
1030 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew( !offSub && !cbSub
1031 ? sizeof(*pMemNt) : RT_UOFFSETOF_DYN(RTR0MEMOBJNT, apMdls[1]),
1032 RTR0MEMOBJTYPE_MAPPING, pv, pMemNtToMap->Core.cb, pszTag);
1033 if (pMemNt)
1034 {
1035 pMemNt->Core.u.Mapping.R0Process = R0Process;
1036 if (!offSub && !cbSub)
1037 pMemNt->fSubMapping = false;
1038 else
1039 {
1040 pMemNt->apMdls[0] = pMdl;
1041 pMemNt->cMdls = 1;
1042 pMemNt->fSubMapping = true;
1043 }
1044
1045 *ppMem = &pMemNt->Core;
1046 return VINF_SUCCESS;
1047 }
1048
1049 rc = VERR_NO_MEMORY;
1050 MmUnmapLockedPages(pv, pMdl);
1051 }
1052 }
1053 __except(EXCEPTION_EXECUTE_HANDLER)
1054 {
1055#ifdef LOG_ENABLED
1056 NTSTATUS rcNt = GetExceptionCode();
1057 Log(("rtR0MemObjNtMap: Exception Code %#x\n", rcNt));
1058#endif
1059
1060 /* nothing */
1061 rc = VERR_MAP_FAILED;
1062 }
1063
1064 }
1065 else
1066 {
1067 AssertReturn( pMemNtToMap->Core.enmType == RTR0MEMOBJTYPE_PHYS
1068 && !pMemNtToMap->Core.u.Phys.fAllocated, VERR_INTERNAL_ERROR);
1069
1070 /* cannot map phys mem to user space (yet). */
1071 if (R0Process != NIL_RTR0PROCESS)
1072 return VERR_NOT_SUPPORTED;
1073
1074 /* Cannot sub-mak these (yet). */
1075 AssertMsgReturn(!offSub && !cbSub, ("%#zx %#zx\n", offSub, cbSub), VERR_NOT_SUPPORTED);
1076
1077
1078 /** @todo uAlignment */
1079 /** @todo How to set the protection on the pages? */
1080 PHYSICAL_ADDRESS Phys;
1081 Phys.QuadPart = pMemNtToMap->Core.u.Phys.PhysBase;
1082 void *pv = MmMapIoSpace(Phys, pMemNtToMap->Core.cb,
1083 pMemNtToMap->Core.u.Phys.uCachePolicy == RTMEM_CACHE_POLICY_MMIO ? MmNonCached : MmCached);
1084 if (pv)
1085 {
1086 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING, pv,
1087 pMemNtToMap->Core.cb, pszTag);
1088 if (pMemNt)
1089 {
1090 pMemNt->Core.u.Mapping.R0Process = R0Process;
1091 *ppMem = &pMemNt->Core;
1092 return VINF_SUCCESS;
1093 }
1094
1095 rc = VERR_NO_MEMORY;
1096 MmUnmapIoSpace(pv, pMemNtToMap->Core.cb);
1097 }
1098 }
1099
1100 NOREF(uAlignment); NOREF(fProt);
1101 return rc;
1102}
1103
1104
1105DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
1106 unsigned fProt, size_t offSub, size_t cbSub, const char *pszTag)
1107{
1108 return rtR0MemObjNtMap(ppMem, pMemToMap, pvFixed, uAlignment, fProt, NIL_RTR0PROCESS, offSub, cbSub, pszTag);
1109}
1110
1111
1112DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment,
1113 unsigned fProt, RTR0PROCESS R0Process, size_t offSub, size_t cbSub, const char *pszTag)
1114{
1115 AssertReturn(R0Process == RTR0ProcHandleSelf(), VERR_NOT_SUPPORTED);
1116 return rtR0MemObjNtMap(ppMem, pMemToMap, (void *)R3PtrFixed, uAlignment, fProt, R0Process, offSub, cbSub, pszTag);
1117}
1118
1119
1120DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
1121{
1122#if 0
1123 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
1124#endif
1125
1126 /*
1127 * Seems there are some issues with this MmProtectMdlSystemAddress API, so
1128 * this code isn't currently enabled until we've tested it with the verifier.
1129 */
1130#if 0
1131 /*
1132 * The API we've got requires a kernel mapping.
1133 */
1134 if ( pMemNt->cMdls
1135 && g_pfnrtMmProtectMdlSystemAddress
1136 && (g_uRtNtMajorVer > 6 || (g_uRtNtMajorVer == 6 && g_uRtNtMinorVer >= 1)) /* Windows 7 and later. */
1137 && pMemNt->Core.pv != NULL
1138 && ( pMemNt->Core.enmType == RTR0MEMOBJTYPE_PAGE
1139 || pMemNt->Core.enmType == RTR0MEMOBJTYPE_LOW
1140 || pMemNt->Core.enmType == RTR0MEMOBJTYPE_CONT
1141 || ( pMemNt->Core.enmType == RTR0MEMOBJTYPE_LOCK
1142 && pMemNt->Core.u.Lock.R0Process == NIL_RTPROCESS)
1143 || ( pMemNt->Core.enmType == RTR0MEMOBJTYPE_MAPPING
1144 && pMemNt->Core.u.Mapping.R0Process == NIL_RTPROCESS) ) )
1145 {
1146 /* Convert the protection. */
1147 LOCK_OPERATION enmLockOp;
1148 ULONG fAccess;
1149 switch (fProt)
1150 {
1151 case RTMEM_PROT_NONE:
1152 fAccess = PAGE_NOACCESS;
1153 enmLockOp = IoReadAccess;
1154 break;
1155 case RTMEM_PROT_READ:
1156 fAccess = PAGE_READONLY;
1157 enmLockOp = IoReadAccess;
1158 break;
1159 case RTMEM_PROT_WRITE:
1160 case RTMEM_PROT_WRITE | RTMEM_PROT_READ:
1161 fAccess = PAGE_READWRITE;
1162 enmLockOp = IoModifyAccess;
1163 break;
1164 case RTMEM_PROT_EXEC:
1165 fAccess = PAGE_EXECUTE;
1166 enmLockOp = IoReadAccess;
1167 break;
1168 case RTMEM_PROT_EXEC | RTMEM_PROT_READ:
1169 fAccess = PAGE_EXECUTE_READ;
1170 enmLockOp = IoReadAccess;
1171 break;
1172 case RTMEM_PROT_EXEC | RTMEM_PROT_WRITE:
1173 case RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ:
1174 fAccess = PAGE_EXECUTE_READWRITE;
1175 enmLockOp = IoModifyAccess;
1176 break;
1177 default:
1178 AssertFailedReturn(VERR_INVALID_FLAGS);
1179 }
1180
1181 NTSTATUS rcNt = STATUS_SUCCESS;
1182# if 0 /** @todo test this against the verifier. */
1183 if (offSub == 0 && pMemNt->Core.cb == cbSub)
1184 {
1185 uint32_t iMdl = pMemNt->cMdls;
1186 while (iMdl-- > 0)
1187 {
1188 rcNt = g_pfnrtMmProtectMdlSystemAddress(pMemNt->apMdls[i], fAccess);
1189 if (!NT_SUCCESS(rcNt))
1190 break;
1191 }
1192 }
1193 else
1194# endif
1195 {
1196 /*
1197 * We ASSUME the following here:
1198 * - MmProtectMdlSystemAddress can deal with nonpaged pool memory
1199 * - MmProtectMdlSystemAddress doesn't actually store anything in the MDL we pass it.
1200 * - We are not required to call MmProtectMdlSystemAddress with PAGE_READWRITE for the
1201 * exact same ranges prior to freeing them.
1202 *
1203 * So, we lock the pages temporarily, call the API and unlock them.
1204 */
1205 uint8_t *pbCur = (uint8_t *)pMemNt->Core.pv + offSub;
1206 while (cbSub > 0 && NT_SUCCESS(rcNt))
1207 {
1208 size_t cbCur = cbSub;
1209 if (cbCur > MAX_LOCK_MEM_SIZE)
1210 cbCur = MAX_LOCK_MEM_SIZE;
1211 PMDL pMdl = IoAllocateMdl(pbCur, (ULONG)cbCur, FALSE, FALSE, NULL);
1212 if (pMdl)
1213 {
1214 __try
1215 {
1216 MmProbeAndLockPages(pMdl, KernelMode, enmLockOp);
1217 }
1218 __except(EXCEPTION_EXECUTE_HANDLER)
1219 {
1220 rcNt = GetExceptionCode();
1221 }
1222 if (NT_SUCCESS(rcNt))
1223 {
1224 rcNt = g_pfnrtMmProtectMdlSystemAddress(pMdl, fAccess);
1225 MmUnlockPages(pMdl);
1226 }
1227 IoFreeMdl(pMdl);
1228 }
1229 else
1230 rcNt = STATUS_NO_MEMORY;
1231 pbCur += cbCur;
1232 cbSub -= cbCur;
1233 }
1234 }
1235
1236 if (NT_SUCCESS(rcNt))
1237 return VINF_SUCCESS;
1238 return RTErrConvertFromNtStatus(rcNt);
1239 }
1240#else
1241 RT_NOREF4(pMem, offSub, cbSub, fProt);
1242#endif
1243
1244 return VERR_NOT_SUPPORTED;
1245}
1246
1247
1248DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
1249{
1250 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
1251
1252 if (pMemNt->cMdls)
1253 {
1254 if (pMemNt->cMdls == 1)
1255 {
1256 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[0]);
1257 return (RTHCPHYS)paPfns[iPage] << PAGE_SHIFT;
1258 }
1259
1260 size_t iMdl = iPage / (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
1261 size_t iMdlPfn = iPage % (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
1262 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[iMdl]);
1263 return (RTHCPHYS)paPfns[iMdlPfn] << PAGE_SHIFT;
1264 }
1265
1266 switch (pMemNt->Core.enmType)
1267 {
1268 case RTR0MEMOBJTYPE_MAPPING:
1269 return rtR0MemObjNativeGetPagePhysAddr(pMemNt->Core.uRel.Child.pParent, iPage);
1270
1271 case RTR0MEMOBJTYPE_PHYS:
1272 return pMemNt->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT);
1273
1274 case RTR0MEMOBJTYPE_PAGE:
1275 case RTR0MEMOBJTYPE_PHYS_NC:
1276 case RTR0MEMOBJTYPE_LOW:
1277 case RTR0MEMOBJTYPE_CONT:
1278 case RTR0MEMOBJTYPE_LOCK:
1279 default:
1280 AssertMsgFailed(("%d\n", pMemNt->Core.enmType));
1281 case RTR0MEMOBJTYPE_RES_VIRT:
1282 return NIL_RTHCPHYS;
1283 }
1284}
1285
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette