1 | /* $Id: memobj-r0drv-nt.cpp 92246 2021-11-06 03:10:49Z vboxsync $ */
|
---|
2 | /** @file
|
---|
3 | * IPRT - Ring-0 Memory Objects, NT.
|
---|
4 | */
|
---|
5 |
|
---|
6 | /*
|
---|
7 | * Copyright (C) 2006-2020 Oracle Corporation
|
---|
8 | *
|
---|
9 | * This file is part of VirtualBox Open Source Edition (OSE), as
|
---|
10 | * available from http://www.virtualbox.org. This file is free software;
|
---|
11 | * you can redistribute it and/or modify it under the terms of the GNU
|
---|
12 | * General Public License (GPL) as published by the Free Software
|
---|
13 | * Foundation, in version 2 as it comes in the "COPYING" file of the
|
---|
14 | * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
|
---|
15 | * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
|
---|
16 | *
|
---|
17 | * The contents of this file may alternatively be used under the terms
|
---|
18 | * of the Common Development and Distribution License Version 1.0
|
---|
19 | * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
|
---|
20 | * VirtualBox OSE distribution, in which case the provisions of the
|
---|
21 | * CDDL are applicable instead of those of the GPL.
|
---|
22 | *
|
---|
23 | * You may elect to license modified versions of this file under the
|
---|
24 | * terms and conditions of either the GPL or the CDDL or both.
|
---|
25 | */
|
---|
26 |
|
---|
27 |
|
---|
28 | /*********************************************************************************************************************************
|
---|
29 | * Header Files *
|
---|
30 | *********************************************************************************************************************************/
|
---|
31 | #include "the-nt-kernel.h"
|
---|
32 |
|
---|
33 | #include <iprt/memobj.h>
|
---|
34 | #include <iprt/alloc.h>
|
---|
35 | #include <iprt/assert.h>
|
---|
36 | #include <iprt/err.h>
|
---|
37 | #include <iprt/log.h>
|
---|
38 | #include <iprt/param.h>
|
---|
39 | #include <iprt/string.h>
|
---|
40 | #include <iprt/process.h>
|
---|
41 | #include "internal/memobj.h"
|
---|
42 | #include "internal-r0drv-nt.h"
|
---|
43 |
|
---|
44 |
|
---|
45 | /*********************************************************************************************************************************
|
---|
46 | * Defined Constants And Macros *
|
---|
47 | *********************************************************************************************************************************/
|
---|
48 | /** Maximum number of bytes we try to lock down in one go.
|
---|
49 | * This is supposed to have a limit right below 256MB, but this appears
|
---|
50 | * to actually be much lower. The values here have been determined experimentally.
|
---|
51 | */
|
---|
52 | #ifdef RT_ARCH_X86
|
---|
53 | # define MAX_LOCK_MEM_SIZE (32*1024*1024) /* 32MB */
|
---|
54 | #endif
|
---|
55 | #ifdef RT_ARCH_AMD64
|
---|
56 | # define MAX_LOCK_MEM_SIZE (24*1024*1024) /* 24MB */
|
---|
57 | #endif
|
---|
58 |
|
---|
59 | /* Newer WDK constants: */
|
---|
60 | #ifndef MM_ALLOCATE_REQUIRE_CONTIGUOUS_CHUNKS
|
---|
61 | # define MM_ALLOCATE_REQUIRE_CONTIGUOUS_CHUNKS 0x20
|
---|
62 | #endif
|
---|
63 | #ifndef MM_ALLOCATE_FAST_LARGE_PAGES
|
---|
64 | # define MM_ALLOCATE_FAST_LARGE_PAGES 0x40
|
---|
65 | #endif
|
---|
66 |
|
---|
67 |
|
---|
68 | /*********************************************************************************************************************************
|
---|
69 | * Structures and Typedefs *
|
---|
70 | *********************************************************************************************************************************/
|
---|
71 | /**
|
---|
72 | * The NT version of the memory object structure.
|
---|
73 | */
|
---|
74 | typedef struct RTR0MEMOBJNT
|
---|
75 | {
|
---|
76 | /** The core structure. */
|
---|
77 | RTR0MEMOBJINTERNAL Core;
|
---|
78 | /** Used MmAllocatePagesForMdl(). */
|
---|
79 | bool fAllocatedPagesForMdl;
|
---|
80 | /** Set if this is sub-section of the parent. */
|
---|
81 | bool fSubMapping;
|
---|
82 | /** Pointer returned by MmSecureVirtualMemory */
|
---|
83 | PVOID pvSecureMem;
|
---|
84 | /** The number of PMDLs (memory descriptor lists) in the array. */
|
---|
85 | uint32_t cMdls;
|
---|
86 | /** Array of MDL pointers. (variable size) */
|
---|
87 | PMDL apMdls[1];
|
---|
88 | } RTR0MEMOBJNT;
|
---|
89 | /** Pointer to the NT version of the memory object structure. */
|
---|
90 | typedef RTR0MEMOBJNT *PRTR0MEMOBJNT;
|
---|
91 |
|
---|
92 |
|
---|
93 |
|
---|
94 | DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
|
---|
95 | {
|
---|
96 | PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
|
---|
97 |
|
---|
98 | /*
|
---|
99 | * Deal with it on a per type basis (just as a variation).
|
---|
100 | */
|
---|
101 | switch (pMemNt->Core.enmType)
|
---|
102 | {
|
---|
103 | case RTR0MEMOBJTYPE_LOW:
|
---|
104 | if (pMemNt->fAllocatedPagesForMdl)
|
---|
105 | {
|
---|
106 | Assert(pMemNt->Core.pv && pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
|
---|
107 | MmUnmapLockedPages(pMemNt->Core.pv, pMemNt->apMdls[0]);
|
---|
108 | pMemNt->Core.pv = NULL;
|
---|
109 | if (pMemNt->pvSecureMem)
|
---|
110 | {
|
---|
111 | g_pfnrtMmUnsecureVirtualMemory(pMemNt->pvSecureMem);
|
---|
112 | pMemNt->pvSecureMem = NULL;
|
---|
113 | }
|
---|
114 |
|
---|
115 | g_pfnrtMmFreePagesFromMdl(pMemNt->apMdls[0]);
|
---|
116 | ExFreePool(pMemNt->apMdls[0]);
|
---|
117 | pMemNt->apMdls[0] = NULL;
|
---|
118 | pMemNt->cMdls = 0;
|
---|
119 | break;
|
---|
120 | }
|
---|
121 | AssertFailed();
|
---|
122 | break;
|
---|
123 |
|
---|
124 | case RTR0MEMOBJTYPE_PAGE:
|
---|
125 | Assert(pMemNt->Core.pv);
|
---|
126 | if (pMemNt->fAllocatedPagesForMdl)
|
---|
127 | {
|
---|
128 | Assert(pMemNt->Core.pv && pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
|
---|
129 | Assert(pMemNt->pvSecureMem == NULL);
|
---|
130 | MmUnmapLockedPages(pMemNt->Core.pv, pMemNt->apMdls[0]);
|
---|
131 | g_pfnrtMmFreePagesFromMdl(pMemNt->apMdls[0]);
|
---|
132 | ExFreePool(pMemNt->apMdls[0]);
|
---|
133 | }
|
---|
134 | else
|
---|
135 | {
|
---|
136 | if (g_pfnrtExFreePoolWithTag)
|
---|
137 | g_pfnrtExFreePoolWithTag(pMemNt->Core.pv, IPRT_NT_POOL_TAG);
|
---|
138 | else
|
---|
139 | ExFreePool(pMemNt->Core.pv);
|
---|
140 |
|
---|
141 | Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
|
---|
142 | IoFreeMdl(pMemNt->apMdls[0]);
|
---|
143 | }
|
---|
144 | pMemNt->Core.pv = NULL;
|
---|
145 | pMemNt->apMdls[0] = NULL;
|
---|
146 | pMemNt->cMdls = 0;
|
---|
147 | break;
|
---|
148 |
|
---|
149 | case RTR0MEMOBJTYPE_CONT:
|
---|
150 | Assert(pMemNt->Core.pv);
|
---|
151 | MmFreeContiguousMemory(pMemNt->Core.pv);
|
---|
152 | pMemNt->Core.pv = NULL;
|
---|
153 |
|
---|
154 | Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
|
---|
155 | IoFreeMdl(pMemNt->apMdls[0]);
|
---|
156 | pMemNt->apMdls[0] = NULL;
|
---|
157 | pMemNt->cMdls = 0;
|
---|
158 | break;
|
---|
159 |
|
---|
160 | case RTR0MEMOBJTYPE_PHYS:
|
---|
161 | /* rtR0MemObjNativeEnterPhys? */
|
---|
162 | if (!pMemNt->Core.u.Phys.fAllocated)
|
---|
163 | {
|
---|
164 | Assert(!pMemNt->fAllocatedPagesForMdl);
|
---|
165 | /* Nothing to do here. */
|
---|
166 | break;
|
---|
167 | }
|
---|
168 | RT_FALL_THRU();
|
---|
169 |
|
---|
170 | case RTR0MEMOBJTYPE_PHYS_NC:
|
---|
171 | if (pMemNt->fAllocatedPagesForMdl)
|
---|
172 | {
|
---|
173 | g_pfnrtMmFreePagesFromMdl(pMemNt->apMdls[0]);
|
---|
174 | ExFreePool(pMemNt->apMdls[0]);
|
---|
175 | pMemNt->apMdls[0] = NULL;
|
---|
176 | pMemNt->cMdls = 0;
|
---|
177 | break;
|
---|
178 | }
|
---|
179 | AssertFailed();
|
---|
180 | break;
|
---|
181 |
|
---|
182 | case RTR0MEMOBJTYPE_LOCK:
|
---|
183 | if (pMemNt->pvSecureMem)
|
---|
184 | {
|
---|
185 | g_pfnrtMmUnsecureVirtualMemory(pMemNt->pvSecureMem);
|
---|
186 | pMemNt->pvSecureMem = NULL;
|
---|
187 | }
|
---|
188 | for (uint32_t i = 0; i < pMemNt->cMdls; i++)
|
---|
189 | {
|
---|
190 | MmUnlockPages(pMemNt->apMdls[i]);
|
---|
191 | IoFreeMdl(pMemNt->apMdls[i]);
|
---|
192 | pMemNt->apMdls[i] = NULL;
|
---|
193 | }
|
---|
194 | break;
|
---|
195 |
|
---|
196 | case RTR0MEMOBJTYPE_RES_VIRT:
|
---|
197 | /* if (pMemNt->Core.u.ResVirt.R0Process == NIL_RTR0PROCESS)
|
---|
198 | {
|
---|
199 | }
|
---|
200 | else
|
---|
201 | {
|
---|
202 | }*/
|
---|
203 | AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n"));
|
---|
204 | return VERR_INTERNAL_ERROR;
|
---|
205 | break;
|
---|
206 |
|
---|
207 | case RTR0MEMOBJTYPE_MAPPING:
|
---|
208 | {
|
---|
209 | PRTR0MEMOBJNT pMemNtParent = (PRTR0MEMOBJNT)pMemNt->Core.uRel.Child.pParent;
|
---|
210 | Assert(pMemNtParent);
|
---|
211 | Assert(pMemNt->Core.pv);
|
---|
212 | Assert((pMemNt->cMdls == 0 && !pMemNt->fSubMapping) || (pMemNt->cMdls == 1 && pMemNt->fSubMapping));
|
---|
213 | if (pMemNtParent->cMdls)
|
---|
214 | {
|
---|
215 | Assert(pMemNtParent->cMdls == 1 && pMemNtParent->apMdls[0]);
|
---|
216 | Assert( pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS
|
---|
217 | || pMemNt->Core.u.Mapping.R0Process == RTR0ProcHandleSelf());
|
---|
218 | if (!pMemNt->cMdls)
|
---|
219 | MmUnmapLockedPages(pMemNt->Core.pv, pMemNtParent->apMdls[0]);
|
---|
220 | else
|
---|
221 | {
|
---|
222 | MmUnmapLockedPages(pMemNt->Core.pv, pMemNt->apMdls[0]);
|
---|
223 | IoFreeMdl(pMemNt->apMdls[0]);
|
---|
224 | pMemNt->apMdls[0] = NULL;
|
---|
225 | }
|
---|
226 | }
|
---|
227 | else
|
---|
228 | {
|
---|
229 | Assert( pMemNtParent->Core.enmType == RTR0MEMOBJTYPE_PHYS
|
---|
230 | && !pMemNtParent->Core.u.Phys.fAllocated);
|
---|
231 | Assert(pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS);
|
---|
232 | Assert(!pMemNt->fSubMapping);
|
---|
233 | MmUnmapIoSpace(pMemNt->Core.pv, pMemNt->Core.cb);
|
---|
234 | }
|
---|
235 | pMemNt->Core.pv = NULL;
|
---|
236 | break;
|
---|
237 | }
|
---|
238 |
|
---|
239 | default:
|
---|
240 | AssertMsgFailed(("enmType=%d\n", pMemNt->Core.enmType));
|
---|
241 | return VERR_INTERNAL_ERROR;
|
---|
242 | }
|
---|
243 |
|
---|
244 | return VINF_SUCCESS;
|
---|
245 | }
|
---|
246 |
|
---|
247 |
|
---|
248 | DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, const char *pszTag)
|
---|
249 | {
|
---|
250 | AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
|
---|
251 | RT_NOREF1(fExecutable);
|
---|
252 |
|
---|
253 | /*
|
---|
254 | * Use MmAllocatePagesForMdl if the allocation is a little bit big.
|
---|
255 | */
|
---|
256 | int rc = VERR_NO_PAGE_MEMORY;
|
---|
257 | if ( cb > _1M
|
---|
258 | && g_pfnrtMmAllocatePagesForMdl
|
---|
259 | && g_pfnrtMmFreePagesFromMdl
|
---|
260 | && g_pfnrtMmMapLockedPagesSpecifyCache)
|
---|
261 | {
|
---|
262 | PHYSICAL_ADDRESS Zero;
|
---|
263 | Zero.QuadPart = 0;
|
---|
264 | PHYSICAL_ADDRESS HighAddr;
|
---|
265 | HighAddr.QuadPart = MAXLONGLONG;
|
---|
266 | PMDL pMdl = g_pfnrtMmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
|
---|
267 | if (pMdl)
|
---|
268 | {
|
---|
269 | if (MmGetMdlByteCount(pMdl) >= cb)
|
---|
270 | {
|
---|
271 | __try
|
---|
272 | {
|
---|
273 | void *pv = g_pfnrtMmMapLockedPagesSpecifyCache(pMdl, KernelMode, MmCached, NULL /* no base address */,
|
---|
274 | FALSE /* no bug check on failure */, NormalPagePriority);
|
---|
275 | if (pv)
|
---|
276 | {
|
---|
277 | #ifdef RT_ARCH_AMD64
|
---|
278 | if (fExecutable)
|
---|
279 | MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
|
---|
280 | #endif
|
---|
281 |
|
---|
282 | PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PAGE, pv, cb, pszTag);
|
---|
283 | if (pMemNt)
|
---|
284 | {
|
---|
285 | pMemNt->Core.fFlags |= RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC;
|
---|
286 | pMemNt->fAllocatedPagesForMdl = true;
|
---|
287 | pMemNt->cMdls = 1;
|
---|
288 | pMemNt->apMdls[0] = pMdl;
|
---|
289 | *ppMem = &pMemNt->Core;
|
---|
290 | return VINF_SUCCESS;
|
---|
291 | }
|
---|
292 | MmUnmapLockedPages(pv, pMdl);
|
---|
293 | }
|
---|
294 | }
|
---|
295 | __except(EXCEPTION_EXECUTE_HANDLER)
|
---|
296 | {
|
---|
297 | # ifdef LOG_ENABLED
|
---|
298 | NTSTATUS rcNt = GetExceptionCode();
|
---|
299 | Log(("rtR0MemObjNativeAllocLow: Exception Code %#x\n", rcNt));
|
---|
300 | # endif
|
---|
301 | /* nothing */
|
---|
302 | }
|
---|
303 | }
|
---|
304 | g_pfnrtMmFreePagesFromMdl(pMdl);
|
---|
305 | ExFreePool(pMdl);
|
---|
306 | }
|
---|
307 | }
|
---|
308 |
|
---|
309 | /*
|
---|
310 | * Try allocate the memory and create an MDL for them so
|
---|
311 | * we can query the physical addresses and do mappings later
|
---|
312 | * without running into out-of-memory conditions and similar problems.
|
---|
313 | */
|
---|
314 | void *pv;
|
---|
315 | if (g_pfnrtExAllocatePoolWithTag)
|
---|
316 | pv = g_pfnrtExAllocatePoolWithTag(NonPagedPool, cb, IPRT_NT_POOL_TAG);
|
---|
317 | else
|
---|
318 | pv = ExAllocatePool(NonPagedPool, cb);
|
---|
319 | if (pv)
|
---|
320 | {
|
---|
321 | PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
|
---|
322 | if (pMdl)
|
---|
323 | {
|
---|
324 | MmBuildMdlForNonPagedPool(pMdl);
|
---|
325 | #ifdef RT_ARCH_AMD64
|
---|
326 | if (fExecutable)
|
---|
327 | MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
|
---|
328 | #endif
|
---|
329 |
|
---|
330 | /*
|
---|
331 | * Create the IPRT memory object.
|
---|
332 | */
|
---|
333 | PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PAGE, pv, cb, pszTag);
|
---|
334 | if (pMemNt)
|
---|
335 | {
|
---|
336 | pMemNt->Core.fFlags |= RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC;
|
---|
337 | pMemNt->cMdls = 1;
|
---|
338 | pMemNt->apMdls[0] = pMdl;
|
---|
339 | *ppMem = &pMemNt->Core;
|
---|
340 | return VINF_SUCCESS;
|
---|
341 | }
|
---|
342 |
|
---|
343 | rc = VERR_NO_MEMORY;
|
---|
344 | IoFreeMdl(pMdl);
|
---|
345 | }
|
---|
346 | ExFreePool(pv);
|
---|
347 | }
|
---|
348 | return rc;
|
---|
349 | }
|
---|
350 |
|
---|
351 |
|
---|
352 | /**
|
---|
353 | * Helper for rtR0MemObjNativeAllocLarge that verifies the result.
|
---|
354 | */
|
---|
355 | static bool rtR0MemObjNtVerifyLargePageAlloc(PMDL pMdl, size_t cb, size_t cbLargePage)
|
---|
356 | {
|
---|
357 | if (MmGetMdlByteCount(pMdl) >= cb)
|
---|
358 | {
|
---|
359 | PPFN_NUMBER const paPfns = MmGetMdlPfnArray(pMdl);
|
---|
360 | size_t const cPagesPerLargePage = cbLargePage >> PAGE_SHIFT;
|
---|
361 | size_t const cLargePages = cb / cbLargePage;
|
---|
362 | size_t iPage = 0;
|
---|
363 | for (size_t iLargePage = 0; iLargePage < cLargePages; iLargePage++)
|
---|
364 | {
|
---|
365 | PFN_NUMBER Pfn = paPfns[iPage];
|
---|
366 | if (!(Pfn & (cbLargePage >> PAGE_SHIFT) - 1U))
|
---|
367 | {
|
---|
368 | for (size_t iSubPage = 1; iSubPage < cPagesPerLargePage; iSubPage++)
|
---|
369 | {
|
---|
370 | iPage++;
|
---|
371 | Pfn++;
|
---|
372 | if (paPfns[iPage] == Pfn)
|
---|
373 | { /* likely */ }
|
---|
374 | else
|
---|
375 | {
|
---|
376 | Log(("rtR0MemObjNativeAllocLarge: Subpage %#zu in large page #%zu is not contiguous: %#x, expected %#x\n",
|
---|
377 | iSubPage, iLargePage, paPfns[iPage], Pfn));
|
---|
378 | return false;
|
---|
379 | }
|
---|
380 | }
|
---|
381 | }
|
---|
382 | else
|
---|
383 | {
|
---|
384 | Log(("rtR0MemObjNativeAllocLarge: Large page #%zu is misaligned: %#x, cbLargePage=%#zx\n",
|
---|
385 | iLargePage, Pfn, cbLargePage));
|
---|
386 | return false;
|
---|
387 | }
|
---|
388 | }
|
---|
389 | return true;
|
---|
390 | }
|
---|
391 | Log(("rtR0MemObjNativeAllocLarge: Got back too few pages: %#zx, requested %#zx\n", MmGetMdlByteCount(pMdl), cb));
|
---|
392 | return false;
|
---|
393 | }
|
---|
394 |
|
---|
395 |
|
---|
396 | DECLHIDDEN(int) rtR0MemObjNativeAllocLarge(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, size_t cbLargePage, uint32_t fFlags,
|
---|
397 | const char *pszTag)
|
---|
398 | {
|
---|
399 | /*
|
---|
400 | * Need the MmAllocatePagesForMdlEx function so we can specify flags.
|
---|
401 | */
|
---|
402 | if ( g_uRtNtVersion >= RTNT_MAKE_VERSION(6,1) /* Windows 7+ */
|
---|
403 | && g_pfnrtMmAllocatePagesForMdlEx
|
---|
404 | && g_pfnrtMmFreePagesFromMdl
|
---|
405 | && g_pfnrtMmMapLockedPagesSpecifyCache)
|
---|
406 | {
|
---|
407 | ULONG fNtFlags = MM_ALLOCATE_FULLY_REQUIRED /* W7+: Make it fail if we don't get all we ask for.*/
|
---|
408 | | MM_ALLOCATE_REQUIRE_CONTIGUOUS_CHUNKS; /* W7+: The SkipBytes chunks must be physcially contiguous. */
|
---|
409 | if ((fFlags & RTMEMOBJ_ALLOC_LARGE_F_FAST) && g_uRtNtVersion >= RTNT_MAKE_VERSION(6, 2))
|
---|
410 | fNtFlags |= MM_ALLOCATE_FAST_LARGE_PAGES; /* W8+: Don't try too hard, just fail if not enough handy. */
|
---|
411 |
|
---|
412 | PHYSICAL_ADDRESS Zero;
|
---|
413 | Zero.QuadPart = 0;
|
---|
414 |
|
---|
415 | PHYSICAL_ADDRESS HighAddr;
|
---|
416 | HighAddr.QuadPart = MAXLONGLONG;
|
---|
417 |
|
---|
418 | PHYSICAL_ADDRESS Skip;
|
---|
419 | Skip.QuadPart = cbLargePage;
|
---|
420 |
|
---|
421 | int rc;
|
---|
422 | PMDL const pMdl = g_pfnrtMmAllocatePagesForMdlEx(Zero, HighAddr, Skip, cb, MmCached, fNtFlags);
|
---|
423 | if (pMdl)
|
---|
424 | {
|
---|
425 | /* Verify the result. */
|
---|
426 | if (rtR0MemObjNtVerifyLargePageAlloc(pMdl, cb, cbLargePage))
|
---|
427 | {
|
---|
428 | /*
|
---|
429 | * Map the allocation into kernel space. Unless the memory is already mapped
|
---|
430 | * somewhere (seems to be actually), I guess it's unlikely that we'll get a
|
---|
431 | * large page aligned mapping back here...
|
---|
432 | */
|
---|
433 | __try
|
---|
434 | {
|
---|
435 | void *pv = g_pfnrtMmMapLockedPagesSpecifyCache(pMdl, KernelMode, MmCached, NULL /* no base address */,
|
---|
436 | FALSE /* no bug check on failure */, NormalPagePriority);
|
---|
437 | if (pv)
|
---|
438 | {
|
---|
439 | /*
|
---|
440 | * Create the memory object.
|
---|
441 | */
|
---|
442 | PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PAGE, pv, cb, pszTag);
|
---|
443 | if (pMemNt)
|
---|
444 | {
|
---|
445 | pMemNt->Core.fFlags |= RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC;
|
---|
446 | pMemNt->fAllocatedPagesForMdl = true;
|
---|
447 | pMemNt->cMdls = 1;
|
---|
448 | pMemNt->apMdls[0] = pMdl;
|
---|
449 | *ppMem = &pMemNt->Core;
|
---|
450 | return VINF_SUCCESS;
|
---|
451 | }
|
---|
452 |
|
---|
453 | MmUnmapLockedPages(pv, pMdl);
|
---|
454 | }
|
---|
455 | }
|
---|
456 | __except(EXCEPTION_EXECUTE_HANDLER)
|
---|
457 | {
|
---|
458 | #ifdef LOG_ENABLED
|
---|
459 | NTSTATUS rcNt = GetExceptionCode();
|
---|
460 | Log(("rtR0MemObjNativeAllocLarge: Exception Code %#x\n", rcNt));
|
---|
461 | #endif
|
---|
462 | /* nothing */
|
---|
463 | }
|
---|
464 | }
|
---|
465 |
|
---|
466 | g_pfnrtMmFreePagesFromMdl(pMdl);
|
---|
467 | ExFreePool(pMdl);
|
---|
468 | rc = VERR_NO_MEMORY;
|
---|
469 | }
|
---|
470 | else
|
---|
471 | rc = fFlags & RTMEMOBJ_ALLOC_LARGE_F_FAST ? VERR_TRY_AGAIN : VERR_NO_MEMORY;
|
---|
472 | return rc;
|
---|
473 | }
|
---|
474 |
|
---|
475 | return rtR0MemObjFallbackAllocLarge(ppMem, cb, cbLargePage, fFlags, pszTag);
|
---|
476 | }
|
---|
477 |
|
---|
478 |
|
---|
479 | DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, const char *pszTag)
|
---|
480 | {
|
---|
481 | AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
|
---|
482 |
|
---|
483 | /*
|
---|
484 | * Try see if we get lucky first...
|
---|
485 | * (We could probably just assume we're lucky on NT4.)
|
---|
486 | */
|
---|
487 | int rc = rtR0MemObjNativeAllocPage(ppMem, cb, fExecutable, pszTag);
|
---|
488 | if (RT_SUCCESS(rc))
|
---|
489 | {
|
---|
490 | size_t iPage = cb >> PAGE_SHIFT;
|
---|
491 | while (iPage-- > 0)
|
---|
492 | if (rtR0MemObjNativeGetPagePhysAddr(*ppMem, iPage) >= _4G)
|
---|
493 | {
|
---|
494 | rc = VERR_NO_LOW_MEMORY;
|
---|
495 | break;
|
---|
496 | }
|
---|
497 | if (RT_SUCCESS(rc))
|
---|
498 | return rc;
|
---|
499 |
|
---|
500 | /* The following ASSUMES that rtR0MemObjNativeAllocPage returns a completed object. */
|
---|
501 | RTR0MemObjFree(*ppMem, false);
|
---|
502 | *ppMem = NULL;
|
---|
503 | }
|
---|
504 |
|
---|
505 | /*
|
---|
506 | * Use MmAllocatePagesForMdl to specify the range of physical addresses we wish to use.
|
---|
507 | */
|
---|
508 | if ( g_pfnrtMmAllocatePagesForMdl
|
---|
509 | && g_pfnrtMmFreePagesFromMdl
|
---|
510 | && g_pfnrtMmMapLockedPagesSpecifyCache)
|
---|
511 | {
|
---|
512 | PHYSICAL_ADDRESS Zero;
|
---|
513 | Zero.QuadPart = 0;
|
---|
514 | PHYSICAL_ADDRESS HighAddr;
|
---|
515 | HighAddr.QuadPart = _4G - 1;
|
---|
516 | PMDL pMdl = g_pfnrtMmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
|
---|
517 | if (pMdl)
|
---|
518 | {
|
---|
519 | if (MmGetMdlByteCount(pMdl) >= cb)
|
---|
520 | {
|
---|
521 | __try
|
---|
522 | {
|
---|
523 | void *pv = g_pfnrtMmMapLockedPagesSpecifyCache(pMdl, KernelMode, MmCached, NULL /* no base address */,
|
---|
524 | FALSE /* no bug check on failure */, NormalPagePriority);
|
---|
525 | if (pv)
|
---|
526 | {
|
---|
527 | PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_LOW, pv, cb, pszTag);
|
---|
528 | if (pMemNt)
|
---|
529 | {
|
---|
530 | pMemNt->Core.fFlags |= RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC;
|
---|
531 | pMemNt->fAllocatedPagesForMdl = true;
|
---|
532 | pMemNt->cMdls = 1;
|
---|
533 | pMemNt->apMdls[0] = pMdl;
|
---|
534 | *ppMem = &pMemNt->Core;
|
---|
535 | return VINF_SUCCESS;
|
---|
536 | }
|
---|
537 | MmUnmapLockedPages(pv, pMdl);
|
---|
538 | }
|
---|
539 | }
|
---|
540 | __except(EXCEPTION_EXECUTE_HANDLER)
|
---|
541 | {
|
---|
542 | # ifdef LOG_ENABLED
|
---|
543 | NTSTATUS rcNt = GetExceptionCode();
|
---|
544 | Log(("rtR0MemObjNativeAllocLow: Exception Code %#x\n", rcNt));
|
---|
545 | # endif
|
---|
546 | /* nothing */
|
---|
547 | }
|
---|
548 | }
|
---|
549 | g_pfnrtMmFreePagesFromMdl(pMdl);
|
---|
550 | ExFreePool(pMdl);
|
---|
551 | }
|
---|
552 | }
|
---|
553 |
|
---|
554 | /*
|
---|
555 | * Fall back on contiguous memory...
|
---|
556 | */
|
---|
557 | return rtR0MemObjNativeAllocCont(ppMem, cb, fExecutable, pszTag);
|
---|
558 | }
|
---|
559 |
|
---|
560 |
|
---|
561 | /**
|
---|
562 | * Internal worker for rtR0MemObjNativeAllocCont(), rtR0MemObjNativeAllocPhys()
|
---|
563 | * and rtR0MemObjNativeAllocPhysNC() that takes a max physical address in addition
|
---|
564 | * to what rtR0MemObjNativeAllocCont() does.
|
---|
565 | *
|
---|
566 | * @returns IPRT status code.
|
---|
567 | * @param ppMem Where to store the pointer to the ring-0 memory object.
|
---|
568 | * @param cb The size.
|
---|
569 | * @param fExecutable Whether the mapping should be executable or not.
|
---|
570 | * @param PhysHighest The highest physical address for the pages in allocation.
|
---|
571 | * @param uAlignment The alignment of the physical memory to allocate.
|
---|
572 | * Supported values are PAGE_SIZE, _2M, _4M and _1G.
|
---|
573 | * @param pszTag Allocation tag used for statistics and such.
|
---|
574 | */
|
---|
575 | static int rtR0MemObjNativeAllocContEx(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, RTHCPHYS PhysHighest,
|
---|
576 | size_t uAlignment, const char *pszTag)
|
---|
577 | {
|
---|
578 | AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
|
---|
579 | RT_NOREF1(fExecutable);
|
---|
580 |
|
---|
581 | /*
|
---|
582 | * Allocate the memory and create an MDL for it.
|
---|
583 | */
|
---|
584 | PHYSICAL_ADDRESS PhysAddrHighest;
|
---|
585 | PhysAddrHighest.QuadPart = PhysHighest;
|
---|
586 | void *pv;
|
---|
587 | if (g_pfnrtMmAllocateContiguousMemorySpecifyCache)
|
---|
588 | {
|
---|
589 | PHYSICAL_ADDRESS PhysAddrLowest, PhysAddrBoundary;
|
---|
590 | PhysAddrLowest.QuadPart = 0;
|
---|
591 | PhysAddrBoundary.QuadPart = (uAlignment == PAGE_SIZE) ? 0 : uAlignment;
|
---|
592 | pv = g_pfnrtMmAllocateContiguousMemorySpecifyCache(cb, PhysAddrLowest, PhysAddrHighest, PhysAddrBoundary, MmCached);
|
---|
593 | }
|
---|
594 | else if (uAlignment == PAGE_SIZE)
|
---|
595 | pv = MmAllocateContiguousMemory(cb, PhysAddrHighest);
|
---|
596 | else
|
---|
597 | return VERR_NOT_SUPPORTED;
|
---|
598 | if (!pv)
|
---|
599 | return VERR_NO_MEMORY;
|
---|
600 |
|
---|
601 | PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
|
---|
602 | if (pMdl)
|
---|
603 | {
|
---|
604 | MmBuildMdlForNonPagedPool(pMdl);
|
---|
605 | #ifdef RT_ARCH_AMD64
|
---|
606 | if (fExecutable)
|
---|
607 | MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
|
---|
608 | #endif
|
---|
609 |
|
---|
610 | PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_CONT, pv, cb, pszTag);
|
---|
611 | if (pMemNt)
|
---|
612 | {
|
---|
613 | pMemNt->Core.fFlags |= RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC;
|
---|
614 | pMemNt->Core.u.Cont.Phys = (RTHCPHYS)*MmGetMdlPfnArray(pMdl) << PAGE_SHIFT;
|
---|
615 | pMemNt->cMdls = 1;
|
---|
616 | pMemNt->apMdls[0] = pMdl;
|
---|
617 | *ppMem = &pMemNt->Core;
|
---|
618 | return VINF_SUCCESS;
|
---|
619 | }
|
---|
620 |
|
---|
621 | IoFreeMdl(pMdl);
|
---|
622 | }
|
---|
623 | MmFreeContiguousMemory(pv);
|
---|
624 | return VERR_NO_MEMORY;
|
---|
625 | }
|
---|
626 |
|
---|
627 |
|
---|
628 | DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, const char *pszTag)
|
---|
629 | {
|
---|
630 | return rtR0MemObjNativeAllocContEx(ppMem, cb, fExecutable, _4G-1, PAGE_SIZE /* alignment */, pszTag);
|
---|
631 | }
|
---|
632 |
|
---|
633 |
|
---|
634 | DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment,
|
---|
635 | const char *pszTag)
|
---|
636 | {
|
---|
637 | /*
|
---|
638 | * Try and see if we're lucky and get a contiguous chunk from MmAllocatePagesForMdl.
|
---|
639 | *
|
---|
640 | * This is preferable to using MmAllocateContiguousMemory because there are
|
---|
641 | * a few situations where the memory shouldn't be mapped, like for instance
|
---|
642 | * VT-x control memory. Since these are rather small allocations (one or
|
---|
643 | * two pages) MmAllocatePagesForMdl will probably be able to satisfy the
|
---|
644 | * request.
|
---|
645 | *
|
---|
646 | * If the allocation is big, the chances are *probably* not very good. The
|
---|
647 | * current limit is kind of random...
|
---|
648 | */
|
---|
649 | if ( cb < _128K
|
---|
650 | && uAlignment == PAGE_SIZE
|
---|
651 | && g_pfnrtMmAllocatePagesForMdl
|
---|
652 | && g_pfnrtMmFreePagesFromMdl)
|
---|
653 | {
|
---|
654 | PHYSICAL_ADDRESS Zero;
|
---|
655 | Zero.QuadPart = 0;
|
---|
656 | PHYSICAL_ADDRESS HighAddr;
|
---|
657 | HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
|
---|
658 | PMDL pMdl = g_pfnrtMmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
|
---|
659 | if (pMdl)
|
---|
660 | {
|
---|
661 | if (MmGetMdlByteCount(pMdl) >= cb)
|
---|
662 | {
|
---|
663 | PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMdl);
|
---|
664 | PFN_NUMBER Pfn = paPfns[0] + 1;
|
---|
665 | const size_t cPages = cb >> PAGE_SHIFT;
|
---|
666 | size_t iPage;
|
---|
667 | for (iPage = 1; iPage < cPages; iPage++, Pfn++)
|
---|
668 | if (paPfns[iPage] != Pfn)
|
---|
669 | break;
|
---|
670 | if (iPage >= cPages)
|
---|
671 | {
|
---|
672 | PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb, pszTag);
|
---|
673 | if (pMemNt)
|
---|
674 | {
|
---|
675 | pMemNt->Core.fFlags |= RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC;
|
---|
676 | pMemNt->Core.u.Phys.fAllocated = true;
|
---|
677 | pMemNt->Core.u.Phys.PhysBase = (RTHCPHYS)paPfns[0] << PAGE_SHIFT;
|
---|
678 | pMemNt->fAllocatedPagesForMdl = true;
|
---|
679 | pMemNt->cMdls = 1;
|
---|
680 | pMemNt->apMdls[0] = pMdl;
|
---|
681 | *ppMem = &pMemNt->Core;
|
---|
682 | return VINF_SUCCESS;
|
---|
683 | }
|
---|
684 | }
|
---|
685 | }
|
---|
686 | g_pfnrtMmFreePagesFromMdl(pMdl);
|
---|
687 | ExFreePool(pMdl);
|
---|
688 | }
|
---|
689 | }
|
---|
690 |
|
---|
691 | return rtR0MemObjNativeAllocContEx(ppMem, cb, false, PhysHighest, uAlignment, pszTag);
|
---|
692 | }
|
---|
693 |
|
---|
694 |
|
---|
695 | DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, const char *pszTag)
|
---|
696 | {
|
---|
697 | if (g_pfnrtMmAllocatePagesForMdl && g_pfnrtMmFreePagesFromMdl)
|
---|
698 | {
|
---|
699 | /** @todo use the Ex version with the fail-if-not-all-requested-pages flag
|
---|
700 | * when possible. */
|
---|
701 | PHYSICAL_ADDRESS Zero;
|
---|
702 | Zero.QuadPart = 0;
|
---|
703 | PHYSICAL_ADDRESS HighAddr;
|
---|
704 | HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
|
---|
705 | PMDL pMdl = g_pfnrtMmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
|
---|
706 | if (pMdl)
|
---|
707 | {
|
---|
708 | if (MmGetMdlByteCount(pMdl) >= cb)
|
---|
709 | {
|
---|
710 | PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS_NC, NULL, cb, pszTag);
|
---|
711 | if (pMemNt)
|
---|
712 | {
|
---|
713 | pMemNt->Core.fFlags |= RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC;
|
---|
714 | pMemNt->fAllocatedPagesForMdl = true;
|
---|
715 | pMemNt->cMdls = 1;
|
---|
716 | pMemNt->apMdls[0] = pMdl;
|
---|
717 | *ppMem = &pMemNt->Core;
|
---|
718 | return VINF_SUCCESS;
|
---|
719 | }
|
---|
720 | }
|
---|
721 | g_pfnrtMmFreePagesFromMdl(pMdl);
|
---|
722 | ExFreePool(pMdl);
|
---|
723 | }
|
---|
724 | return VERR_NO_MEMORY;
|
---|
725 | }
|
---|
726 | return VERR_NOT_SUPPORTED;
|
---|
727 | }
|
---|
728 |
|
---|
729 |
|
---|
730 | DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy,
|
---|
731 | const char *pszTag)
|
---|
732 | {
|
---|
733 | AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE || uCachePolicy == RTMEM_CACHE_POLICY_MMIO, VERR_NOT_SUPPORTED);
|
---|
734 |
|
---|
735 | /*
|
---|
736 | * Validate the address range and create a descriptor for it.
|
---|
737 | */
|
---|
738 | PFN_NUMBER Pfn = (PFN_NUMBER)(Phys >> PAGE_SHIFT);
|
---|
739 | if (((RTHCPHYS)Pfn << PAGE_SHIFT) != Phys)
|
---|
740 | return VERR_ADDRESS_TOO_BIG;
|
---|
741 |
|
---|
742 | /*
|
---|
743 | * Create the IPRT memory object.
|
---|
744 | */
|
---|
745 | PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb, pszTag);
|
---|
746 | if (pMemNt)
|
---|
747 | {
|
---|
748 | pMemNt->Core.u.Phys.PhysBase = Phys;
|
---|
749 | pMemNt->Core.u.Phys.fAllocated = false;
|
---|
750 | pMemNt->Core.u.Phys.uCachePolicy = uCachePolicy;
|
---|
751 | *ppMem = &pMemNt->Core;
|
---|
752 | return VINF_SUCCESS;
|
---|
753 | }
|
---|
754 | return VERR_NO_MEMORY;
|
---|
755 | }
|
---|
756 |
|
---|
757 |
|
---|
758 | /**
|
---|
759 | * Internal worker for locking down pages.
|
---|
760 | *
|
---|
761 | * @return IPRT status code.
|
---|
762 | *
|
---|
763 | * @param ppMem Where to store the memory object pointer.
|
---|
764 | * @param pv First page.
|
---|
765 | * @param cb Number of bytes.
|
---|
766 | * @param fAccess The desired access, a combination of RTMEM_PROT_READ
|
---|
767 | * and RTMEM_PROT_WRITE.
|
---|
768 | * @param R0Process The process \a pv and \a cb refers to.
|
---|
769 | * @param pszTag Allocation tag used for statistics and such.
|
---|
770 | */
|
---|
771 | static int rtR0MemObjNtLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process,
|
---|
772 | const char *pszTag)
|
---|
773 | {
|
---|
774 | /*
|
---|
775 | * Calc the number of MDLs we need and allocate the memory object structure.
|
---|
776 | */
|
---|
777 | size_t cMdls = cb / MAX_LOCK_MEM_SIZE;
|
---|
778 | if (cb % MAX_LOCK_MEM_SIZE)
|
---|
779 | cMdls++;
|
---|
780 | if (cMdls >= UINT32_MAX)
|
---|
781 | return VERR_OUT_OF_RANGE;
|
---|
782 | PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(RT_UOFFSETOF_DYN(RTR0MEMOBJNT, apMdls[cMdls]),
|
---|
783 | RTR0MEMOBJTYPE_LOCK, pv, cb, pszTag);
|
---|
784 | if (!pMemNt)
|
---|
785 | return VERR_NO_MEMORY;
|
---|
786 |
|
---|
787 | /*
|
---|
788 | * Loop locking down the sub parts of the memory.
|
---|
789 | */
|
---|
790 | int rc = VINF_SUCCESS;
|
---|
791 | size_t cbTotal = 0;
|
---|
792 | uint8_t *pb = (uint8_t *)pv;
|
---|
793 | uint32_t iMdl;
|
---|
794 | for (iMdl = 0; iMdl < cMdls; iMdl++)
|
---|
795 | {
|
---|
796 | /*
|
---|
797 | * Calc the Mdl size and allocate it.
|
---|
798 | */
|
---|
799 | size_t cbCur = cb - cbTotal;
|
---|
800 | if (cbCur > MAX_LOCK_MEM_SIZE)
|
---|
801 | cbCur = MAX_LOCK_MEM_SIZE;
|
---|
802 | AssertMsg(cbCur, ("cbCur: 0!\n"));
|
---|
803 | PMDL pMdl = IoAllocateMdl(pb, (ULONG)cbCur, FALSE, FALSE, NULL);
|
---|
804 | if (!pMdl)
|
---|
805 | {
|
---|
806 | rc = VERR_NO_MEMORY;
|
---|
807 | break;
|
---|
808 | }
|
---|
809 |
|
---|
810 | /*
|
---|
811 | * Lock the pages.
|
---|
812 | */
|
---|
813 | __try
|
---|
814 | {
|
---|
815 | MmProbeAndLockPages(pMdl,
|
---|
816 | R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode,
|
---|
817 | fAccess == RTMEM_PROT_READ
|
---|
818 | ? IoReadAccess
|
---|
819 | : fAccess == RTMEM_PROT_WRITE
|
---|
820 | ? IoWriteAccess
|
---|
821 | : IoModifyAccess);
|
---|
822 |
|
---|
823 | pMemNt->apMdls[iMdl] = pMdl;
|
---|
824 | pMemNt->cMdls++;
|
---|
825 | }
|
---|
826 | __except(EXCEPTION_EXECUTE_HANDLER)
|
---|
827 | {
|
---|
828 | IoFreeMdl(pMdl);
|
---|
829 | rc = VERR_LOCK_FAILED;
|
---|
830 | break;
|
---|
831 | }
|
---|
832 |
|
---|
833 | if ( R0Process != NIL_RTR0PROCESS
|
---|
834 | && g_pfnrtMmSecureVirtualMemory
|
---|
835 | && g_pfnrtMmUnsecureVirtualMemory)
|
---|
836 | {
|
---|
837 | /* Make sure the user process can't change the allocation. */
|
---|
838 | pMemNt->pvSecureMem = g_pfnrtMmSecureVirtualMemory(pv, cb,
|
---|
839 | fAccess & RTMEM_PROT_WRITE
|
---|
840 | ? PAGE_READWRITE
|
---|
841 | : PAGE_READONLY);
|
---|
842 | if (!pMemNt->pvSecureMem)
|
---|
843 | {
|
---|
844 | rc = VERR_NO_MEMORY;
|
---|
845 | break;
|
---|
846 | }
|
---|
847 | }
|
---|
848 |
|
---|
849 | /* next */
|
---|
850 | cbTotal += cbCur;
|
---|
851 | pb += cbCur;
|
---|
852 | }
|
---|
853 | if (RT_SUCCESS(rc))
|
---|
854 | {
|
---|
855 | Assert(pMemNt->cMdls == cMdls);
|
---|
856 | pMemNt->Core.u.Lock.R0Process = R0Process;
|
---|
857 | *ppMem = &pMemNt->Core;
|
---|
858 | return rc;
|
---|
859 | }
|
---|
860 |
|
---|
861 | /*
|
---|
862 | * We failed, perform cleanups.
|
---|
863 | */
|
---|
864 | while (iMdl-- > 0)
|
---|
865 | {
|
---|
866 | MmUnlockPages(pMemNt->apMdls[iMdl]);
|
---|
867 | IoFreeMdl(pMemNt->apMdls[iMdl]);
|
---|
868 | pMemNt->apMdls[iMdl] = NULL;
|
---|
869 | }
|
---|
870 | if (pMemNt->pvSecureMem)
|
---|
871 | {
|
---|
872 | if (g_pfnrtMmUnsecureVirtualMemory)
|
---|
873 | g_pfnrtMmUnsecureVirtualMemory(pMemNt->pvSecureMem);
|
---|
874 | pMemNt->pvSecureMem = NULL;
|
---|
875 | }
|
---|
876 |
|
---|
877 | rtR0MemObjDelete(&pMemNt->Core);
|
---|
878 | return rc;
|
---|
879 | }
|
---|
880 |
|
---|
881 |
|
---|
882 | DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess,
|
---|
883 | RTR0PROCESS R0Process, const char *pszTag)
|
---|
884 | {
|
---|
885 | AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
|
---|
886 | /* (Can use MmProbeAndLockProcessPages if we need to mess with other processes later.) */
|
---|
887 | return rtR0MemObjNtLock(ppMem, (void *)R3Ptr, cb, fAccess, R0Process, pszTag);
|
---|
888 | }
|
---|
889 |
|
---|
890 |
|
---|
891 | DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess, const char *pszTag)
|
---|
892 | {
|
---|
893 | return rtR0MemObjNtLock(ppMem, pv, cb, fAccess, NIL_RTR0PROCESS, pszTag);
|
---|
894 | }
|
---|
895 |
|
---|
896 |
|
---|
897 | DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment,
|
---|
898 | const char *pszTag)
|
---|
899 | {
|
---|
900 | /*
|
---|
901 | * MmCreateSection(SEC_RESERVE) + MmMapViewInSystemSpace perhaps?
|
---|
902 | * Or MmAllocateMappingAddress?
|
---|
903 | */
|
---|
904 | RT_NOREF(ppMem, pvFixed, cb, uAlignment, pszTag);
|
---|
905 | return VERR_NOT_SUPPORTED;
|
---|
906 | }
|
---|
907 |
|
---|
908 |
|
---|
909 | DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment,
|
---|
910 | RTR0PROCESS R0Process, const char *pszTag)
|
---|
911 | {
|
---|
912 | /*
|
---|
913 | * ZeCreateSection(SEC_RESERVE) + ZwMapViewOfSection perhaps?
|
---|
914 | */
|
---|
915 | RT_NOREF(ppMem, R3PtrFixed, cb, uAlignment, R0Process, pszTag);
|
---|
916 | return VERR_NOT_SUPPORTED;
|
---|
917 | }
|
---|
918 |
|
---|
919 |
|
---|
920 | /**
|
---|
921 | * Internal worker for rtR0MemObjNativeMapKernel and rtR0MemObjNativeMapUser.
|
---|
922 | *
|
---|
923 | * @returns IPRT status code.
|
---|
924 | * @param ppMem Where to store the memory object for the mapping.
|
---|
925 | * @param pMemToMap The memory object to map.
|
---|
926 | * @param pvFixed Where to map it. (void *)-1 if anywhere is fine.
|
---|
927 | * @param uAlignment The alignment requirement for the mapping.
|
---|
928 | * @param fProt The desired page protection for the mapping.
|
---|
929 | * @param R0Process If NIL_RTR0PROCESS map into system (kernel) memory.
|
---|
930 | * If not nil, it's the current process.
|
---|
931 | * @param offSub Offset into @a pMemToMap to start mapping.
|
---|
932 | * @param cbSub The number of bytes to map from @a pMapToMem. 0 if
|
---|
933 | * we're to map everything. Non-zero if @a offSub is
|
---|
934 | * non-zero.
|
---|
935 | * @param pszTag Allocation tag used for statistics and such.
|
---|
936 | */
|
---|
937 | static int rtR0MemObjNtMap(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
|
---|
938 | unsigned fProt, RTR0PROCESS R0Process, size_t offSub, size_t cbSub, const char *pszTag)
|
---|
939 | {
|
---|
940 | int rc = VERR_MAP_FAILED;
|
---|
941 |
|
---|
942 | /*
|
---|
943 | * Check that the specified alignment is supported.
|
---|
944 | */
|
---|
945 | if (uAlignment > PAGE_SIZE)
|
---|
946 | return VERR_NOT_SUPPORTED;
|
---|
947 |
|
---|
948 | /*
|
---|
949 | * There are two basic cases here, either we've got an MDL and can
|
---|
950 | * map it using MmMapLockedPages, or we've got a contiguous physical
|
---|
951 | * range (MMIO most likely) and can use MmMapIoSpace.
|
---|
952 | */
|
---|
953 | PRTR0MEMOBJNT pMemNtToMap = (PRTR0MEMOBJNT)pMemToMap;
|
---|
954 | if (pMemNtToMap->cMdls)
|
---|
955 | {
|
---|
956 | /* don't attempt map locked regions with more than one mdl. */
|
---|
957 | if (pMemNtToMap->cMdls != 1)
|
---|
958 | return VERR_NOT_SUPPORTED;
|
---|
959 |
|
---|
960 | /* Need g_pfnrtMmMapLockedPagesSpecifyCache to map to a specific address. */
|
---|
961 | if (pvFixed != (void *)-1 && g_pfnrtMmMapLockedPagesSpecifyCache == NULL)
|
---|
962 | return VERR_NOT_SUPPORTED;
|
---|
963 |
|
---|
964 | /* we can't map anything to the first page, sorry. */
|
---|
965 | if (pvFixed == 0)
|
---|
966 | return VERR_NOT_SUPPORTED;
|
---|
967 |
|
---|
968 | /* only one system mapping for now - no time to figure out MDL restrictions right now. */
|
---|
969 | if ( pMemNtToMap->Core.uRel.Parent.cMappings
|
---|
970 | && R0Process == NIL_RTR0PROCESS)
|
---|
971 | {
|
---|
972 | if (pMemNtToMap->Core.enmType != RTR0MEMOBJTYPE_PHYS_NC)
|
---|
973 | return VERR_NOT_SUPPORTED;
|
---|
974 | uint32_t iMapping = pMemNtToMap->Core.uRel.Parent.cMappings;
|
---|
975 | while (iMapping-- > 0)
|
---|
976 | {
|
---|
977 | PRTR0MEMOBJNT pMapping = (PRTR0MEMOBJNT)pMemNtToMap->Core.uRel.Parent.papMappings[iMapping];
|
---|
978 | if ( pMapping->Core.enmType != RTR0MEMOBJTYPE_MAPPING
|
---|
979 | || pMapping->Core.u.Mapping.R0Process == NIL_RTR0PROCESS)
|
---|
980 | return VERR_NOT_SUPPORTED;
|
---|
981 | }
|
---|
982 | }
|
---|
983 |
|
---|
984 | /* Create a partial MDL if this is a sub-range request. */
|
---|
985 | PMDL pMdl;
|
---|
986 | if (!offSub && !cbSub)
|
---|
987 | pMdl = pMemNtToMap->apMdls[0];
|
---|
988 | else
|
---|
989 | {
|
---|
990 | pMdl = IoAllocateMdl(NULL, (ULONG)cbSub, FALSE, FALSE, NULL);
|
---|
991 | if (pMdl)
|
---|
992 | IoBuildPartialMdl(pMemNtToMap->apMdls[0], pMdl,
|
---|
993 | (uint8_t *)MmGetMdlVirtualAddress(pMemNtToMap->apMdls[0]) + offSub, (ULONG)cbSub);
|
---|
994 | else
|
---|
995 | {
|
---|
996 | IoFreeMdl(pMdl);
|
---|
997 | return VERR_NO_MEMORY;
|
---|
998 | }
|
---|
999 | }
|
---|
1000 |
|
---|
1001 | __try
|
---|
1002 | {
|
---|
1003 | /** @todo uAlignment */
|
---|
1004 | /** @todo How to set the protection on the pages? */
|
---|
1005 | void *pv;
|
---|
1006 | if (g_pfnrtMmMapLockedPagesSpecifyCache)
|
---|
1007 | pv = g_pfnrtMmMapLockedPagesSpecifyCache(pMdl,
|
---|
1008 | R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode,
|
---|
1009 | MmCached,
|
---|
1010 | pvFixed != (void *)-1 ? pvFixed : NULL,
|
---|
1011 | FALSE /* no bug check on failure */,
|
---|
1012 | NormalPagePriority);
|
---|
1013 | else
|
---|
1014 | pv = MmMapLockedPages(pMdl, R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode);
|
---|
1015 | if (pv)
|
---|
1016 | {
|
---|
1017 | NOREF(fProt);
|
---|
1018 |
|
---|
1019 | PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew( !offSub && !cbSub
|
---|
1020 | ? sizeof(*pMemNt) : RT_UOFFSETOF_DYN(RTR0MEMOBJNT, apMdls[1]),
|
---|
1021 | RTR0MEMOBJTYPE_MAPPING, pv, pMemNtToMap->Core.cb, pszTag);
|
---|
1022 | if (pMemNt)
|
---|
1023 | {
|
---|
1024 | pMemNt->Core.u.Mapping.R0Process = R0Process;
|
---|
1025 | if (!offSub && !cbSub)
|
---|
1026 | pMemNt->fSubMapping = false;
|
---|
1027 | else
|
---|
1028 | {
|
---|
1029 | pMemNt->apMdls[0] = pMdl;
|
---|
1030 | pMemNt->cMdls = 1;
|
---|
1031 | pMemNt->fSubMapping = true;
|
---|
1032 | }
|
---|
1033 |
|
---|
1034 | *ppMem = &pMemNt->Core;
|
---|
1035 | return VINF_SUCCESS;
|
---|
1036 | }
|
---|
1037 |
|
---|
1038 | rc = VERR_NO_MEMORY;
|
---|
1039 | MmUnmapLockedPages(pv, pMdl);
|
---|
1040 | }
|
---|
1041 | }
|
---|
1042 | __except(EXCEPTION_EXECUTE_HANDLER)
|
---|
1043 | {
|
---|
1044 | #ifdef LOG_ENABLED
|
---|
1045 | NTSTATUS rcNt = GetExceptionCode();
|
---|
1046 | Log(("rtR0MemObjNtMap: Exception Code %#x\n", rcNt));
|
---|
1047 | #endif
|
---|
1048 |
|
---|
1049 | /* nothing */
|
---|
1050 | rc = VERR_MAP_FAILED;
|
---|
1051 | }
|
---|
1052 |
|
---|
1053 | }
|
---|
1054 | else
|
---|
1055 | {
|
---|
1056 | AssertReturn( pMemNtToMap->Core.enmType == RTR0MEMOBJTYPE_PHYS
|
---|
1057 | && !pMemNtToMap->Core.u.Phys.fAllocated, VERR_INTERNAL_ERROR);
|
---|
1058 |
|
---|
1059 | /* cannot map phys mem to user space (yet). */
|
---|
1060 | if (R0Process != NIL_RTR0PROCESS)
|
---|
1061 | return VERR_NOT_SUPPORTED;
|
---|
1062 |
|
---|
1063 | /* Cannot sub-mak these (yet). */
|
---|
1064 | AssertMsgReturn(!offSub && !cbSub, ("%#zx %#zx\n", offSub, cbSub), VERR_NOT_SUPPORTED);
|
---|
1065 |
|
---|
1066 |
|
---|
1067 | /** @todo uAlignment */
|
---|
1068 | /** @todo How to set the protection on the pages? */
|
---|
1069 | PHYSICAL_ADDRESS Phys;
|
---|
1070 | Phys.QuadPart = pMemNtToMap->Core.u.Phys.PhysBase;
|
---|
1071 | void *pv = MmMapIoSpace(Phys, pMemNtToMap->Core.cb,
|
---|
1072 | pMemNtToMap->Core.u.Phys.uCachePolicy == RTMEM_CACHE_POLICY_MMIO ? MmNonCached : MmCached);
|
---|
1073 | if (pv)
|
---|
1074 | {
|
---|
1075 | PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING, pv,
|
---|
1076 | pMemNtToMap->Core.cb, pszTag);
|
---|
1077 | if (pMemNt)
|
---|
1078 | {
|
---|
1079 | pMemNt->Core.u.Mapping.R0Process = R0Process;
|
---|
1080 | *ppMem = &pMemNt->Core;
|
---|
1081 | return VINF_SUCCESS;
|
---|
1082 | }
|
---|
1083 |
|
---|
1084 | rc = VERR_NO_MEMORY;
|
---|
1085 | MmUnmapIoSpace(pv, pMemNtToMap->Core.cb);
|
---|
1086 | }
|
---|
1087 | }
|
---|
1088 |
|
---|
1089 | NOREF(uAlignment); NOREF(fProt);
|
---|
1090 | return rc;
|
---|
1091 | }
|
---|
1092 |
|
---|
1093 |
|
---|
1094 | DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
|
---|
1095 | unsigned fProt, size_t offSub, size_t cbSub, const char *pszTag)
|
---|
1096 | {
|
---|
1097 | return rtR0MemObjNtMap(ppMem, pMemToMap, pvFixed, uAlignment, fProt, NIL_RTR0PROCESS, offSub, cbSub, pszTag);
|
---|
1098 | }
|
---|
1099 |
|
---|
1100 |
|
---|
1101 | DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment,
|
---|
1102 | unsigned fProt, RTR0PROCESS R0Process, size_t offSub, size_t cbSub, const char *pszTag)
|
---|
1103 | {
|
---|
1104 | AssertReturn(R0Process == RTR0ProcHandleSelf(), VERR_NOT_SUPPORTED);
|
---|
1105 | return rtR0MemObjNtMap(ppMem, pMemToMap, (void *)R3PtrFixed, uAlignment, fProt, R0Process, offSub, cbSub, pszTag);
|
---|
1106 | }
|
---|
1107 |
|
---|
1108 |
|
---|
1109 | DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
|
---|
1110 | {
|
---|
1111 | #if 0
|
---|
1112 | PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
|
---|
1113 | #endif
|
---|
1114 |
|
---|
1115 | /*
|
---|
1116 | * Seems there are some issues with this MmProtectMdlSystemAddress API, so
|
---|
1117 | * this code isn't currently enabled until we've tested it with the verifier.
|
---|
1118 | */
|
---|
1119 | #if 0
|
---|
1120 | /*
|
---|
1121 | * The API we've got requires a kernel mapping.
|
---|
1122 | */
|
---|
1123 | if ( pMemNt->cMdls
|
---|
1124 | && g_pfnrtMmProtectMdlSystemAddress
|
---|
1125 | && (g_uRtNtMajorVer > 6 || (g_uRtNtMajorVer == 6 && g_uRtNtMinorVer >= 1)) /* Windows 7 and later. */
|
---|
1126 | && pMemNt->Core.pv != NULL
|
---|
1127 | && ( pMemNt->Core.enmType == RTR0MEMOBJTYPE_PAGE
|
---|
1128 | || pMemNt->Core.enmType == RTR0MEMOBJTYPE_LOW
|
---|
1129 | || pMemNt->Core.enmType == RTR0MEMOBJTYPE_CONT
|
---|
1130 | || ( pMemNt->Core.enmType == RTR0MEMOBJTYPE_LOCK
|
---|
1131 | && pMemNt->Core.u.Lock.R0Process == NIL_RTPROCESS)
|
---|
1132 | || ( pMemNt->Core.enmType == RTR0MEMOBJTYPE_MAPPING
|
---|
1133 | && pMemNt->Core.u.Mapping.R0Process == NIL_RTPROCESS) ) )
|
---|
1134 | {
|
---|
1135 | /* Convert the protection. */
|
---|
1136 | LOCK_OPERATION enmLockOp;
|
---|
1137 | ULONG fAccess;
|
---|
1138 | switch (fProt)
|
---|
1139 | {
|
---|
1140 | case RTMEM_PROT_NONE:
|
---|
1141 | fAccess = PAGE_NOACCESS;
|
---|
1142 | enmLockOp = IoReadAccess;
|
---|
1143 | break;
|
---|
1144 | case RTMEM_PROT_READ:
|
---|
1145 | fAccess = PAGE_READONLY;
|
---|
1146 | enmLockOp = IoReadAccess;
|
---|
1147 | break;
|
---|
1148 | case RTMEM_PROT_WRITE:
|
---|
1149 | case RTMEM_PROT_WRITE | RTMEM_PROT_READ:
|
---|
1150 | fAccess = PAGE_READWRITE;
|
---|
1151 | enmLockOp = IoModifyAccess;
|
---|
1152 | break;
|
---|
1153 | case RTMEM_PROT_EXEC:
|
---|
1154 | fAccess = PAGE_EXECUTE;
|
---|
1155 | enmLockOp = IoReadAccess;
|
---|
1156 | break;
|
---|
1157 | case RTMEM_PROT_EXEC | RTMEM_PROT_READ:
|
---|
1158 | fAccess = PAGE_EXECUTE_READ;
|
---|
1159 | enmLockOp = IoReadAccess;
|
---|
1160 | break;
|
---|
1161 | case RTMEM_PROT_EXEC | RTMEM_PROT_WRITE:
|
---|
1162 | case RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ:
|
---|
1163 | fAccess = PAGE_EXECUTE_READWRITE;
|
---|
1164 | enmLockOp = IoModifyAccess;
|
---|
1165 | break;
|
---|
1166 | default:
|
---|
1167 | AssertFailedReturn(VERR_INVALID_FLAGS);
|
---|
1168 | }
|
---|
1169 |
|
---|
1170 | NTSTATUS rcNt = STATUS_SUCCESS;
|
---|
1171 | # if 0 /** @todo test this against the verifier. */
|
---|
1172 | if (offSub == 0 && pMemNt->Core.cb == cbSub)
|
---|
1173 | {
|
---|
1174 | uint32_t iMdl = pMemNt->cMdls;
|
---|
1175 | while (iMdl-- > 0)
|
---|
1176 | {
|
---|
1177 | rcNt = g_pfnrtMmProtectMdlSystemAddress(pMemNt->apMdls[i], fAccess);
|
---|
1178 | if (!NT_SUCCESS(rcNt))
|
---|
1179 | break;
|
---|
1180 | }
|
---|
1181 | }
|
---|
1182 | else
|
---|
1183 | # endif
|
---|
1184 | {
|
---|
1185 | /*
|
---|
1186 | * We ASSUME the following here:
|
---|
1187 | * - MmProtectMdlSystemAddress can deal with nonpaged pool memory
|
---|
1188 | * - MmProtectMdlSystemAddress doesn't actually store anything in the MDL we pass it.
|
---|
1189 | * - We are not required to call MmProtectMdlSystemAddress with PAGE_READWRITE for the
|
---|
1190 | * exact same ranges prior to freeing them.
|
---|
1191 | *
|
---|
1192 | * So, we lock the pages temporarily, call the API and unlock them.
|
---|
1193 | */
|
---|
1194 | uint8_t *pbCur = (uint8_t *)pMemNt->Core.pv + offSub;
|
---|
1195 | while (cbSub > 0 && NT_SUCCESS(rcNt))
|
---|
1196 | {
|
---|
1197 | size_t cbCur = cbSub;
|
---|
1198 | if (cbCur > MAX_LOCK_MEM_SIZE)
|
---|
1199 | cbCur = MAX_LOCK_MEM_SIZE;
|
---|
1200 | PMDL pMdl = IoAllocateMdl(pbCur, (ULONG)cbCur, FALSE, FALSE, NULL);
|
---|
1201 | if (pMdl)
|
---|
1202 | {
|
---|
1203 | __try
|
---|
1204 | {
|
---|
1205 | MmProbeAndLockPages(pMdl, KernelMode, enmLockOp);
|
---|
1206 | }
|
---|
1207 | __except(EXCEPTION_EXECUTE_HANDLER)
|
---|
1208 | {
|
---|
1209 | rcNt = GetExceptionCode();
|
---|
1210 | }
|
---|
1211 | if (NT_SUCCESS(rcNt))
|
---|
1212 | {
|
---|
1213 | rcNt = g_pfnrtMmProtectMdlSystemAddress(pMdl, fAccess);
|
---|
1214 | MmUnlockPages(pMdl);
|
---|
1215 | }
|
---|
1216 | IoFreeMdl(pMdl);
|
---|
1217 | }
|
---|
1218 | else
|
---|
1219 | rcNt = STATUS_NO_MEMORY;
|
---|
1220 | pbCur += cbCur;
|
---|
1221 | cbSub -= cbCur;
|
---|
1222 | }
|
---|
1223 | }
|
---|
1224 |
|
---|
1225 | if (NT_SUCCESS(rcNt))
|
---|
1226 | return VINF_SUCCESS;
|
---|
1227 | return RTErrConvertFromNtStatus(rcNt);
|
---|
1228 | }
|
---|
1229 | #else
|
---|
1230 | RT_NOREF4(pMem, offSub, cbSub, fProt);
|
---|
1231 | #endif
|
---|
1232 |
|
---|
1233 | return VERR_NOT_SUPPORTED;
|
---|
1234 | }
|
---|
1235 |
|
---|
1236 |
|
---|
1237 | DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
|
---|
1238 | {
|
---|
1239 | PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
|
---|
1240 |
|
---|
1241 | if (pMemNt->cMdls)
|
---|
1242 | {
|
---|
1243 | if (pMemNt->cMdls == 1)
|
---|
1244 | {
|
---|
1245 | PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[0]);
|
---|
1246 | return (RTHCPHYS)paPfns[iPage] << PAGE_SHIFT;
|
---|
1247 | }
|
---|
1248 |
|
---|
1249 | size_t iMdl = iPage / (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
|
---|
1250 | size_t iMdlPfn = iPage % (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
|
---|
1251 | PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[iMdl]);
|
---|
1252 | return (RTHCPHYS)paPfns[iMdlPfn] << PAGE_SHIFT;
|
---|
1253 | }
|
---|
1254 |
|
---|
1255 | switch (pMemNt->Core.enmType)
|
---|
1256 | {
|
---|
1257 | case RTR0MEMOBJTYPE_MAPPING:
|
---|
1258 | return rtR0MemObjNativeGetPagePhysAddr(pMemNt->Core.uRel.Child.pParent, iPage);
|
---|
1259 |
|
---|
1260 | case RTR0MEMOBJTYPE_PHYS:
|
---|
1261 | return pMemNt->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT);
|
---|
1262 |
|
---|
1263 | case RTR0MEMOBJTYPE_PAGE:
|
---|
1264 | case RTR0MEMOBJTYPE_PHYS_NC:
|
---|
1265 | case RTR0MEMOBJTYPE_LOW:
|
---|
1266 | case RTR0MEMOBJTYPE_CONT:
|
---|
1267 | case RTR0MEMOBJTYPE_LOCK:
|
---|
1268 | default:
|
---|
1269 | AssertMsgFailed(("%d\n", pMemNt->Core.enmType));
|
---|
1270 | case RTR0MEMOBJTYPE_RES_VIRT:
|
---|
1271 | return NIL_RTHCPHYS;
|
---|
1272 | }
|
---|
1273 | }
|
---|
1274 |
|
---|