1 | /* $Id: memobj-r0drv-nt.cpp 91482 2021-09-30 00:12:26Z vboxsync $ */
|
---|
2 | /** @file
|
---|
3 | * IPRT - Ring-0 Memory Objects, NT.
|
---|
4 | */
|
---|
5 |
|
---|
6 | /*
|
---|
7 | * Copyright (C) 2006-2020 Oracle Corporation
|
---|
8 | *
|
---|
9 | * This file is part of VirtualBox Open Source Edition (OSE), as
|
---|
10 | * available from http://www.virtualbox.org. This file is free software;
|
---|
11 | * you can redistribute it and/or modify it under the terms of the GNU
|
---|
12 | * General Public License (GPL) as published by the Free Software
|
---|
13 | * Foundation, in version 2 as it comes in the "COPYING" file of the
|
---|
14 | * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
|
---|
15 | * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
|
---|
16 | *
|
---|
17 | * The contents of this file may alternatively be used under the terms
|
---|
18 | * of the Common Development and Distribution License Version 1.0
|
---|
19 | * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
|
---|
20 | * VirtualBox OSE distribution, in which case the provisions of the
|
---|
21 | * CDDL are applicable instead of those of the GPL.
|
---|
22 | *
|
---|
23 | * You may elect to license modified versions of this file under the
|
---|
24 | * terms and conditions of either the GPL or the CDDL or both.
|
---|
25 | */
|
---|
26 |
|
---|
27 |
|
---|
28 | /*********************************************************************************************************************************
|
---|
29 | * Header Files *
|
---|
30 | *********************************************************************************************************************************/
|
---|
31 | #include "the-nt-kernel.h"
|
---|
32 |
|
---|
33 | #include <iprt/memobj.h>
|
---|
34 | #include <iprt/alloc.h>
|
---|
35 | #include <iprt/assert.h>
|
---|
36 | #include <iprt/err.h>
|
---|
37 | #include <iprt/log.h>
|
---|
38 | #include <iprt/param.h>
|
---|
39 | #include <iprt/string.h>
|
---|
40 | #include <iprt/process.h>
|
---|
41 | #include "internal/memobj.h"
|
---|
42 | #include "internal-r0drv-nt.h"
|
---|
43 |
|
---|
44 |
|
---|
45 | /*********************************************************************************************************************************
|
---|
46 | * Defined Constants And Macros *
|
---|
47 | *********************************************************************************************************************************/
|
---|
48 | /** Maximum number of bytes we try to lock down in one go.
|
---|
49 | * This is supposed to have a limit right below 256MB, but this appears
|
---|
50 | * to actually be much lower. The values here have been determined experimentally.
|
---|
51 | */
|
---|
52 | #ifdef RT_ARCH_X86
|
---|
53 | # define MAX_LOCK_MEM_SIZE (32*1024*1024) /* 32MB */
|
---|
54 | #endif
|
---|
55 | #ifdef RT_ARCH_AMD64
|
---|
56 | # define MAX_LOCK_MEM_SIZE (24*1024*1024) /* 24MB */
|
---|
57 | #endif
|
---|
58 |
|
---|
59 | /* Newer WDK constants: */
|
---|
60 | #ifndef MM_ALLOCATE_REQUIRE_CONTIGUOUS_CHUNKS
|
---|
61 | # define MM_ALLOCATE_REQUIRE_CONTIGUOUS_CHUNKS 0x20
|
---|
62 | #endif
|
---|
63 | #ifndef MM_ALLOCATE_FAST_LARGE_PAGES
|
---|
64 | # define MM_ALLOCATE_FAST_LARGE_PAGES 0x40
|
---|
65 | #endif
|
---|
66 |
|
---|
67 |
|
---|
68 | /*********************************************************************************************************************************
|
---|
69 | * Structures and Typedefs *
|
---|
70 | *********************************************************************************************************************************/
|
---|
71 | /**
|
---|
72 | * The NT version of the memory object structure.
|
---|
73 | */
|
---|
74 | typedef struct RTR0MEMOBJNT
|
---|
75 | {
|
---|
76 | /** The core structure. */
|
---|
77 | RTR0MEMOBJINTERNAL Core;
|
---|
78 | /** Used MmAllocatePagesForMdl(). */
|
---|
79 | bool fAllocatedPagesForMdl;
|
---|
80 | /** Set if this is sub-section of the parent. */
|
---|
81 | bool fSubMapping;
|
---|
82 | /** Pointer returned by MmSecureVirtualMemory */
|
---|
83 | PVOID pvSecureMem;
|
---|
84 | /** The number of PMDLs (memory descriptor lists) in the array. */
|
---|
85 | uint32_t cMdls;
|
---|
86 | /** Array of MDL pointers. (variable size) */
|
---|
87 | PMDL apMdls[1];
|
---|
88 | } RTR0MEMOBJNT;
|
---|
89 | /** Pointer to the NT version of the memory object structure. */
|
---|
90 | typedef RTR0MEMOBJNT *PRTR0MEMOBJNT;
|
---|
91 |
|
---|
92 |
|
---|
93 |
|
---|
94 | DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
|
---|
95 | {
|
---|
96 | PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
|
---|
97 |
|
---|
98 | /*
|
---|
99 | * Deal with it on a per type basis (just as a variation).
|
---|
100 | */
|
---|
101 | switch (pMemNt->Core.enmType)
|
---|
102 | {
|
---|
103 | case RTR0MEMOBJTYPE_LOW:
|
---|
104 | if (pMemNt->fAllocatedPagesForMdl)
|
---|
105 | {
|
---|
106 | Assert(pMemNt->Core.pv && pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
|
---|
107 | MmUnmapLockedPages(pMemNt->Core.pv, pMemNt->apMdls[0]);
|
---|
108 | pMemNt->Core.pv = NULL;
|
---|
109 | if (pMemNt->pvSecureMem)
|
---|
110 | {
|
---|
111 | g_pfnrtMmUnsecureVirtualMemory(pMemNt->pvSecureMem);
|
---|
112 | pMemNt->pvSecureMem = NULL;
|
---|
113 | }
|
---|
114 |
|
---|
115 | g_pfnrtMmFreePagesFromMdl(pMemNt->apMdls[0]);
|
---|
116 | ExFreePool(pMemNt->apMdls[0]);
|
---|
117 | pMemNt->apMdls[0] = NULL;
|
---|
118 | pMemNt->cMdls = 0;
|
---|
119 | break;
|
---|
120 | }
|
---|
121 | AssertFailed();
|
---|
122 | break;
|
---|
123 |
|
---|
124 | case RTR0MEMOBJTYPE_PAGE:
|
---|
125 | Assert(pMemNt->Core.pv);
|
---|
126 | if (pMemNt->fAllocatedPagesForMdl)
|
---|
127 | {
|
---|
128 | Assert(pMemNt->Core.pv && pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
|
---|
129 | Assert(pMemNt->pvSecureMem == NULL);
|
---|
130 | MmUnmapLockedPages(pMemNt->Core.pv, pMemNt->apMdls[0]);
|
---|
131 | g_pfnrtMmFreePagesFromMdl(pMemNt->apMdls[0]);
|
---|
132 | ExFreePool(pMemNt->apMdls[0]);
|
---|
133 | }
|
---|
134 | else
|
---|
135 | {
|
---|
136 | if (g_pfnrtExFreePoolWithTag)
|
---|
137 | g_pfnrtExFreePoolWithTag(pMemNt->Core.pv, IPRT_NT_POOL_TAG);
|
---|
138 | else
|
---|
139 | ExFreePool(pMemNt->Core.pv);
|
---|
140 |
|
---|
141 | Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
|
---|
142 | IoFreeMdl(pMemNt->apMdls[0]);
|
---|
143 | }
|
---|
144 | pMemNt->Core.pv = NULL;
|
---|
145 | pMemNt->apMdls[0] = NULL;
|
---|
146 | pMemNt->cMdls = 0;
|
---|
147 | break;
|
---|
148 |
|
---|
149 | case RTR0MEMOBJTYPE_CONT:
|
---|
150 | Assert(pMemNt->Core.pv);
|
---|
151 | MmFreeContiguousMemory(pMemNt->Core.pv);
|
---|
152 | pMemNt->Core.pv = NULL;
|
---|
153 |
|
---|
154 | Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
|
---|
155 | IoFreeMdl(pMemNt->apMdls[0]);
|
---|
156 | pMemNt->apMdls[0] = NULL;
|
---|
157 | pMemNt->cMdls = 0;
|
---|
158 | break;
|
---|
159 |
|
---|
160 | case RTR0MEMOBJTYPE_PHYS:
|
---|
161 | /* rtR0MemObjNativeEnterPhys? */
|
---|
162 | if (!pMemNt->Core.u.Phys.fAllocated)
|
---|
163 | {
|
---|
164 | Assert(!pMemNt->fAllocatedPagesForMdl);
|
---|
165 | /* Nothing to do here. */
|
---|
166 | break;
|
---|
167 | }
|
---|
168 | RT_FALL_THRU();
|
---|
169 |
|
---|
170 | case RTR0MEMOBJTYPE_PHYS_NC:
|
---|
171 | if (pMemNt->fAllocatedPagesForMdl)
|
---|
172 | {
|
---|
173 | g_pfnrtMmFreePagesFromMdl(pMemNt->apMdls[0]);
|
---|
174 | ExFreePool(pMemNt->apMdls[0]);
|
---|
175 | pMemNt->apMdls[0] = NULL;
|
---|
176 | pMemNt->cMdls = 0;
|
---|
177 | break;
|
---|
178 | }
|
---|
179 | AssertFailed();
|
---|
180 | break;
|
---|
181 |
|
---|
182 | case RTR0MEMOBJTYPE_LOCK:
|
---|
183 | if (pMemNt->pvSecureMem)
|
---|
184 | {
|
---|
185 | g_pfnrtMmUnsecureVirtualMemory(pMemNt->pvSecureMem);
|
---|
186 | pMemNt->pvSecureMem = NULL;
|
---|
187 | }
|
---|
188 | for (uint32_t i = 0; i < pMemNt->cMdls; i++)
|
---|
189 | {
|
---|
190 | MmUnlockPages(pMemNt->apMdls[i]);
|
---|
191 | IoFreeMdl(pMemNt->apMdls[i]);
|
---|
192 | pMemNt->apMdls[i] = NULL;
|
---|
193 | }
|
---|
194 | break;
|
---|
195 |
|
---|
196 | case RTR0MEMOBJTYPE_RES_VIRT:
|
---|
197 | /* if (pMemNt->Core.u.ResVirt.R0Process == NIL_RTR0PROCESS)
|
---|
198 | {
|
---|
199 | }
|
---|
200 | else
|
---|
201 | {
|
---|
202 | }*/
|
---|
203 | AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n"));
|
---|
204 | return VERR_INTERNAL_ERROR;
|
---|
205 | break;
|
---|
206 |
|
---|
207 | case RTR0MEMOBJTYPE_MAPPING:
|
---|
208 | {
|
---|
209 | PRTR0MEMOBJNT pMemNtParent = (PRTR0MEMOBJNT)pMemNt->Core.uRel.Child.pParent;
|
---|
210 | Assert(pMemNtParent);
|
---|
211 | Assert(pMemNt->Core.pv);
|
---|
212 | Assert((pMemNt->cMdls == 0 && !pMemNt->fSubMapping) || (pMemNt->cMdls == 1 && pMemNt->fSubMapping));
|
---|
213 | if (pMemNtParent->cMdls)
|
---|
214 | {
|
---|
215 | Assert(pMemNtParent->cMdls == 1 && pMemNtParent->apMdls[0]);
|
---|
216 | Assert( pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS
|
---|
217 | || pMemNt->Core.u.Mapping.R0Process == RTR0ProcHandleSelf());
|
---|
218 | if (!pMemNt->cMdls)
|
---|
219 | MmUnmapLockedPages(pMemNt->Core.pv, pMemNtParent->apMdls[0]);
|
---|
220 | else
|
---|
221 | {
|
---|
222 | MmUnmapLockedPages(pMemNt->Core.pv, pMemNt->apMdls[0]);
|
---|
223 | IoFreeMdl(pMemNt->apMdls[0]);
|
---|
224 | pMemNt->apMdls[0] = NULL;
|
---|
225 | }
|
---|
226 | }
|
---|
227 | else
|
---|
228 | {
|
---|
229 | Assert( pMemNtParent->Core.enmType == RTR0MEMOBJTYPE_PHYS
|
---|
230 | && !pMemNtParent->Core.u.Phys.fAllocated);
|
---|
231 | Assert(pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS);
|
---|
232 | Assert(!pMemNt->fSubMapping);
|
---|
233 | MmUnmapIoSpace(pMemNt->Core.pv, pMemNt->Core.cb);
|
---|
234 | }
|
---|
235 | pMemNt->Core.pv = NULL;
|
---|
236 | break;
|
---|
237 | }
|
---|
238 |
|
---|
239 | default:
|
---|
240 | AssertMsgFailed(("enmType=%d\n", pMemNt->Core.enmType));
|
---|
241 | return VERR_INTERNAL_ERROR;
|
---|
242 | }
|
---|
243 |
|
---|
244 | return VINF_SUCCESS;
|
---|
245 | }
|
---|
246 |
|
---|
247 |
|
---|
248 | DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
|
---|
249 | {
|
---|
250 | AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
|
---|
251 | RT_NOREF1(fExecutable);
|
---|
252 |
|
---|
253 | /*
|
---|
254 | * Use MmAllocatePagesForMdl if the allocation is a little bit big.
|
---|
255 | */
|
---|
256 | int rc = VERR_NO_PAGE_MEMORY;
|
---|
257 | if ( cb > _1M
|
---|
258 | && g_pfnrtMmAllocatePagesForMdl
|
---|
259 | && g_pfnrtMmFreePagesFromMdl
|
---|
260 | && g_pfnrtMmMapLockedPagesSpecifyCache)
|
---|
261 | {
|
---|
262 | PHYSICAL_ADDRESS Zero;
|
---|
263 | Zero.QuadPart = 0;
|
---|
264 | PHYSICAL_ADDRESS HighAddr;
|
---|
265 | HighAddr.QuadPart = MAXLONGLONG;
|
---|
266 | PMDL pMdl = g_pfnrtMmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
|
---|
267 | if (pMdl)
|
---|
268 | {
|
---|
269 | if (MmGetMdlByteCount(pMdl) >= cb)
|
---|
270 | {
|
---|
271 | __try
|
---|
272 | {
|
---|
273 | void *pv = g_pfnrtMmMapLockedPagesSpecifyCache(pMdl, KernelMode, MmCached, NULL /* no base address */,
|
---|
274 | FALSE /* no bug check on failure */, NormalPagePriority);
|
---|
275 | if (pv)
|
---|
276 | {
|
---|
277 | #ifdef RT_ARCH_AMD64
|
---|
278 | if (fExecutable)
|
---|
279 | MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
|
---|
280 | #endif
|
---|
281 |
|
---|
282 | PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PAGE, pv, cb, NULL);
|
---|
283 | if (pMemNt)
|
---|
284 | {
|
---|
285 | pMemNt->fAllocatedPagesForMdl = true;
|
---|
286 | pMemNt->cMdls = 1;
|
---|
287 | pMemNt->apMdls[0] = pMdl;
|
---|
288 | *ppMem = &pMemNt->Core;
|
---|
289 | return VINF_SUCCESS;
|
---|
290 | }
|
---|
291 | MmUnmapLockedPages(pv, pMdl);
|
---|
292 | }
|
---|
293 | }
|
---|
294 | __except(EXCEPTION_EXECUTE_HANDLER)
|
---|
295 | {
|
---|
296 | # ifdef LOG_ENABLED
|
---|
297 | NTSTATUS rcNt = GetExceptionCode();
|
---|
298 | Log(("rtR0MemObjNativeAllocLow: Exception Code %#x\n", rcNt));
|
---|
299 | # endif
|
---|
300 | /* nothing */
|
---|
301 | }
|
---|
302 | }
|
---|
303 | g_pfnrtMmFreePagesFromMdl(pMdl);
|
---|
304 | ExFreePool(pMdl);
|
---|
305 | }
|
---|
306 | }
|
---|
307 |
|
---|
308 | /*
|
---|
309 | * Try allocate the memory and create an MDL for them so
|
---|
310 | * we can query the physical addresses and do mappings later
|
---|
311 | * without running into out-of-memory conditions and similar problems.
|
---|
312 | */
|
---|
313 | void *pv;
|
---|
314 | if (g_pfnrtExAllocatePoolWithTag)
|
---|
315 | pv = g_pfnrtExAllocatePoolWithTag(NonPagedPool, cb, IPRT_NT_POOL_TAG);
|
---|
316 | else
|
---|
317 | pv = ExAllocatePool(NonPagedPool, cb);
|
---|
318 | if (pv)
|
---|
319 | {
|
---|
320 | PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
|
---|
321 | if (pMdl)
|
---|
322 | {
|
---|
323 | MmBuildMdlForNonPagedPool(pMdl);
|
---|
324 | #ifdef RT_ARCH_AMD64
|
---|
325 | if (fExecutable)
|
---|
326 | MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
|
---|
327 | #endif
|
---|
328 |
|
---|
329 | /*
|
---|
330 | * Create the IPRT memory object.
|
---|
331 | */
|
---|
332 | PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PAGE, pv, cb, NULL);
|
---|
333 | if (pMemNt)
|
---|
334 | {
|
---|
335 | pMemNt->cMdls = 1;
|
---|
336 | pMemNt->apMdls[0] = pMdl;
|
---|
337 | *ppMem = &pMemNt->Core;
|
---|
338 | return VINF_SUCCESS;
|
---|
339 | }
|
---|
340 |
|
---|
341 | rc = VERR_NO_MEMORY;
|
---|
342 | IoFreeMdl(pMdl);
|
---|
343 | }
|
---|
344 | ExFreePool(pv);
|
---|
345 | }
|
---|
346 | return rc;
|
---|
347 | }
|
---|
348 |
|
---|
349 |
|
---|
350 | /**
|
---|
351 | * Helper for rtR0MemObjNativeAllocLarge that verifies the result.
|
---|
352 | */
|
---|
353 | static bool rtR0MemObjNtVerifyLargePageAlloc(PMDL pMdl, size_t cb, size_t cbLargePage)
|
---|
354 | {
|
---|
355 | if (MmGetMdlByteCount(pMdl) >= cb)
|
---|
356 | {
|
---|
357 | PPFN_NUMBER const paPfns = MmGetMdlPfnArray(pMdl);
|
---|
358 | size_t const cPagesPerLargePage = cbLargePage >> PAGE_SHIFT;
|
---|
359 | size_t const cLargePages = cb / cbLargePage;
|
---|
360 | size_t iPage = 0;
|
---|
361 | for (size_t iLargePage = 0; iLargePage < cLargePages; iLargePage++)
|
---|
362 | {
|
---|
363 | PFN_NUMBER Pfn = paPfns[iPage];
|
---|
364 | if (!(Pfn & (cbLargePage >> PAGE_SHIFT) - 1U))
|
---|
365 | {
|
---|
366 | for (size_t iSubPage = 1; iSubPage < cPagesPerLargePage; iSubPage++)
|
---|
367 | {
|
---|
368 | iPage++;
|
---|
369 | Pfn++;
|
---|
370 | if (paPfns[iPage] == Pfn)
|
---|
371 | { /* likely */ }
|
---|
372 | else
|
---|
373 | {
|
---|
374 | Log(("rtR0MemObjNativeAllocLarge: Subpage %#zu in large page #%zu is not contiguous: %#x, expected %#x\n",
|
---|
375 | iSubPage, iLargePage, paPfns[iPage], Pfn));
|
---|
376 | return false;
|
---|
377 | }
|
---|
378 | }
|
---|
379 | }
|
---|
380 | else
|
---|
381 | {
|
---|
382 | Log(("rtR0MemObjNativeAllocLarge: Large page #%zu is misaligned: %#x, cbLargePage=%#zx\n",
|
---|
383 | iLargePage, Pfn, cbLargePage));
|
---|
384 | return false;
|
---|
385 | }
|
---|
386 | }
|
---|
387 | return true;
|
---|
388 | }
|
---|
389 | Log(("rtR0MemObjNativeAllocLarge: Got back too few pages: %#zx, requested %#zx\n", MmGetMdlByteCount(pMdl), cb));
|
---|
390 | return false;
|
---|
391 | }
|
---|
392 |
|
---|
393 |
|
---|
394 | DECLHIDDEN(int) rtR0MemObjNativeAllocLarge(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, size_t cbLargePage, uint32_t fFlags,
|
---|
395 | const char *pszTag)
|
---|
396 | {
|
---|
397 | /*
|
---|
398 | * Need the MmAllocatePagesForMdlEx function so we can specify flags.
|
---|
399 | */
|
---|
400 | if ( g_uRtNtVersion >= RTNT_MAKE_VERSION(6,1) /* Windows 7+ */
|
---|
401 | && g_pfnrtMmAllocatePagesForMdlEx
|
---|
402 | && g_pfnrtMmFreePagesFromMdl
|
---|
403 | && g_pfnrtMmMapLockedPagesSpecifyCache)
|
---|
404 | {
|
---|
405 | ULONG fNtFlags = MM_ALLOCATE_FULLY_REQUIRED /* W7+: Make it fail if we don't get all we ask for.*/
|
---|
406 | | MM_ALLOCATE_REQUIRE_CONTIGUOUS_CHUNKS; /* W7+: The SkipBytes chunks must be physcially contiguous. */
|
---|
407 | if ((fFlags & RTMEMOBJ_ALLOC_LARGE_F_FAST) && g_uRtNtVersion >= RTNT_MAKE_VERSION(6, 2))
|
---|
408 | fNtFlags |= MM_ALLOCATE_FAST_LARGE_PAGES; /* W8+: Don't try too hard, just fail if not enough handy. */
|
---|
409 |
|
---|
410 | PHYSICAL_ADDRESS Zero;
|
---|
411 | Zero.QuadPart = 0;
|
---|
412 |
|
---|
413 | PHYSICAL_ADDRESS HighAddr;
|
---|
414 | HighAddr.QuadPart = MAXLONGLONG;
|
---|
415 |
|
---|
416 | PHYSICAL_ADDRESS Skip;
|
---|
417 | Skip.QuadPart = cbLargePage;
|
---|
418 |
|
---|
419 | int rc;
|
---|
420 | PMDL const pMdl = g_pfnrtMmAllocatePagesForMdlEx(Zero, HighAddr, Skip, cb, MmCached, fNtFlags);
|
---|
421 | if (pMdl)
|
---|
422 | {
|
---|
423 | /* Verify the result. */
|
---|
424 | if (rtR0MemObjNtVerifyLargePageAlloc(pMdl, cb, cbLargePage))
|
---|
425 | {
|
---|
426 | /*
|
---|
427 | * Map the allocation into kernel space. Unless the memory is already mapped
|
---|
428 | * somewhere (seems to be actually), I guess it's unlikely that we'll get a
|
---|
429 | * large page aligned mapping back here...
|
---|
430 | */
|
---|
431 | __try
|
---|
432 | {
|
---|
433 | void *pv = g_pfnrtMmMapLockedPagesSpecifyCache(pMdl, KernelMode, MmCached, NULL /* no base address */,
|
---|
434 | FALSE /* no bug check on failure */, NormalPagePriority);
|
---|
435 | if (pv)
|
---|
436 | {
|
---|
437 | /*
|
---|
438 | * Create the memory object.
|
---|
439 | */
|
---|
440 | PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PAGE, pv, cb, pszTag);
|
---|
441 | if (pMemNt)
|
---|
442 | {
|
---|
443 | pMemNt->fAllocatedPagesForMdl = true;
|
---|
444 | pMemNt->cMdls = 1;
|
---|
445 | pMemNt->apMdls[0] = pMdl;
|
---|
446 | *ppMem = &pMemNt->Core;
|
---|
447 | return VINF_SUCCESS;
|
---|
448 | }
|
---|
449 |
|
---|
450 | MmUnmapLockedPages(pv, pMdl);
|
---|
451 | }
|
---|
452 | }
|
---|
453 | __except(EXCEPTION_EXECUTE_HANDLER)
|
---|
454 | {
|
---|
455 | #ifdef LOG_ENABLED
|
---|
456 | NTSTATUS rcNt = GetExceptionCode();
|
---|
457 | Log(("rtR0MemObjNativeAllocLarge: Exception Code %#x\n", rcNt));
|
---|
458 | #endif
|
---|
459 | /* nothing */
|
---|
460 | }
|
---|
461 | }
|
---|
462 |
|
---|
463 | g_pfnrtMmFreePagesFromMdl(pMdl);
|
---|
464 | ExFreePool(pMdl);
|
---|
465 | rc = VERR_NO_MEMORY;
|
---|
466 | }
|
---|
467 | else
|
---|
468 | rc = fFlags & RTMEMOBJ_ALLOC_LARGE_F_FAST ? VERR_TRY_AGAIN : VERR_NO_MEMORY;
|
---|
469 | return rc;
|
---|
470 | }
|
---|
471 |
|
---|
472 | return rtR0MemObjFallbackAllocLarge(ppMem, cb, cbLargePage, fFlags, pszTag);
|
---|
473 | }
|
---|
474 |
|
---|
475 |
|
---|
476 | DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
|
---|
477 | {
|
---|
478 | AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
|
---|
479 |
|
---|
480 | /*
|
---|
481 | * Try see if we get lucky first...
|
---|
482 | * (We could probably just assume we're lucky on NT4.)
|
---|
483 | */
|
---|
484 | int rc = rtR0MemObjNativeAllocPage(ppMem, cb, fExecutable);
|
---|
485 | if (RT_SUCCESS(rc))
|
---|
486 | {
|
---|
487 | size_t iPage = cb >> PAGE_SHIFT;
|
---|
488 | while (iPage-- > 0)
|
---|
489 | if (rtR0MemObjNativeGetPagePhysAddr(*ppMem, iPage) >= _4G)
|
---|
490 | {
|
---|
491 | rc = VERR_NO_LOW_MEMORY;
|
---|
492 | break;
|
---|
493 | }
|
---|
494 | if (RT_SUCCESS(rc))
|
---|
495 | return rc;
|
---|
496 |
|
---|
497 | /* The following ASSUMES that rtR0MemObjNativeAllocPage returns a completed object. */
|
---|
498 | RTR0MemObjFree(*ppMem, false);
|
---|
499 | *ppMem = NULL;
|
---|
500 | }
|
---|
501 |
|
---|
502 | /*
|
---|
503 | * Use MmAllocatePagesForMdl to specify the range of physical addresses we wish to use.
|
---|
504 | */
|
---|
505 | if ( g_pfnrtMmAllocatePagesForMdl
|
---|
506 | && g_pfnrtMmFreePagesFromMdl
|
---|
507 | && g_pfnrtMmMapLockedPagesSpecifyCache)
|
---|
508 | {
|
---|
509 | PHYSICAL_ADDRESS Zero;
|
---|
510 | Zero.QuadPart = 0;
|
---|
511 | PHYSICAL_ADDRESS HighAddr;
|
---|
512 | HighAddr.QuadPart = _4G - 1;
|
---|
513 | PMDL pMdl = g_pfnrtMmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
|
---|
514 | if (pMdl)
|
---|
515 | {
|
---|
516 | if (MmGetMdlByteCount(pMdl) >= cb)
|
---|
517 | {
|
---|
518 | __try
|
---|
519 | {
|
---|
520 | void *pv = g_pfnrtMmMapLockedPagesSpecifyCache(pMdl, KernelMode, MmCached, NULL /* no base address */,
|
---|
521 | FALSE /* no bug check on failure */, NormalPagePriority);
|
---|
522 | if (pv)
|
---|
523 | {
|
---|
524 | PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_LOW, pv, cb, NULL);
|
---|
525 | if (pMemNt)
|
---|
526 | {
|
---|
527 | pMemNt->fAllocatedPagesForMdl = true;
|
---|
528 | pMemNt->cMdls = 1;
|
---|
529 | pMemNt->apMdls[0] = pMdl;
|
---|
530 | *ppMem = &pMemNt->Core;
|
---|
531 | return VINF_SUCCESS;
|
---|
532 | }
|
---|
533 | MmUnmapLockedPages(pv, pMdl);
|
---|
534 | }
|
---|
535 | }
|
---|
536 | __except(EXCEPTION_EXECUTE_HANDLER)
|
---|
537 | {
|
---|
538 | # ifdef LOG_ENABLED
|
---|
539 | NTSTATUS rcNt = GetExceptionCode();
|
---|
540 | Log(("rtR0MemObjNativeAllocLow: Exception Code %#x\n", rcNt));
|
---|
541 | # endif
|
---|
542 | /* nothing */
|
---|
543 | }
|
---|
544 | }
|
---|
545 | g_pfnrtMmFreePagesFromMdl(pMdl);
|
---|
546 | ExFreePool(pMdl);
|
---|
547 | }
|
---|
548 | }
|
---|
549 |
|
---|
550 | /*
|
---|
551 | * Fall back on contiguous memory...
|
---|
552 | */
|
---|
553 | return rtR0MemObjNativeAllocCont(ppMem, cb, fExecutable);
|
---|
554 | }
|
---|
555 |
|
---|
556 |
|
---|
557 | /**
|
---|
558 | * Internal worker for rtR0MemObjNativeAllocCont(), rtR0MemObjNativeAllocPhys()
|
---|
559 | * and rtR0MemObjNativeAllocPhysNC() that takes a max physical address in addition
|
---|
560 | * to what rtR0MemObjNativeAllocCont() does.
|
---|
561 | *
|
---|
562 | * @returns IPRT status code.
|
---|
563 | * @param ppMem Where to store the pointer to the ring-0 memory object.
|
---|
564 | * @param cb The size.
|
---|
565 | * @param fExecutable Whether the mapping should be executable or not.
|
---|
566 | * @param PhysHighest The highest physical address for the pages in allocation.
|
---|
567 | * @param uAlignment The alignment of the physical memory to allocate.
|
---|
568 | * Supported values are PAGE_SIZE, _2M, _4M and _1G.
|
---|
569 | */
|
---|
570 | static int rtR0MemObjNativeAllocContEx(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, RTHCPHYS PhysHighest,
|
---|
571 | size_t uAlignment)
|
---|
572 | {
|
---|
573 | AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
|
---|
574 | RT_NOREF1(fExecutable);
|
---|
575 |
|
---|
576 | /*
|
---|
577 | * Allocate the memory and create an MDL for it.
|
---|
578 | */
|
---|
579 | PHYSICAL_ADDRESS PhysAddrHighest;
|
---|
580 | PhysAddrHighest.QuadPart = PhysHighest;
|
---|
581 | void *pv;
|
---|
582 | if (g_pfnrtMmAllocateContiguousMemorySpecifyCache)
|
---|
583 | {
|
---|
584 | PHYSICAL_ADDRESS PhysAddrLowest, PhysAddrBoundary;
|
---|
585 | PhysAddrLowest.QuadPart = 0;
|
---|
586 | PhysAddrBoundary.QuadPart = (uAlignment == PAGE_SIZE) ? 0 : uAlignment;
|
---|
587 | pv = g_pfnrtMmAllocateContiguousMemorySpecifyCache(cb, PhysAddrLowest, PhysAddrHighest, PhysAddrBoundary, MmCached);
|
---|
588 | }
|
---|
589 | else if (uAlignment == PAGE_SIZE)
|
---|
590 | pv = MmAllocateContiguousMemory(cb, PhysAddrHighest);
|
---|
591 | else
|
---|
592 | return VERR_NOT_SUPPORTED;
|
---|
593 | if (!pv)
|
---|
594 | return VERR_NO_MEMORY;
|
---|
595 |
|
---|
596 | PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
|
---|
597 | if (pMdl)
|
---|
598 | {
|
---|
599 | MmBuildMdlForNonPagedPool(pMdl);
|
---|
600 | #ifdef RT_ARCH_AMD64
|
---|
601 | if (fExecutable)
|
---|
602 | MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
|
---|
603 | #endif
|
---|
604 |
|
---|
605 | PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_CONT, pv, cb, NULL);
|
---|
606 | if (pMemNt)
|
---|
607 | {
|
---|
608 | pMemNt->Core.u.Cont.Phys = (RTHCPHYS)*MmGetMdlPfnArray(pMdl) << PAGE_SHIFT;
|
---|
609 | pMemNt->cMdls = 1;
|
---|
610 | pMemNt->apMdls[0] = pMdl;
|
---|
611 | *ppMem = &pMemNt->Core;
|
---|
612 | return VINF_SUCCESS;
|
---|
613 | }
|
---|
614 |
|
---|
615 | IoFreeMdl(pMdl);
|
---|
616 | }
|
---|
617 | MmFreeContiguousMemory(pv);
|
---|
618 | return VERR_NO_MEMORY;
|
---|
619 | }
|
---|
620 |
|
---|
621 |
|
---|
622 | DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
|
---|
623 | {
|
---|
624 | return rtR0MemObjNativeAllocContEx(ppMem, cb, fExecutable, _4G-1, PAGE_SIZE /* alignment */);
|
---|
625 | }
|
---|
626 |
|
---|
627 |
|
---|
628 | DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment,
|
---|
629 | const char *pszTag)
|
---|
630 | {
|
---|
631 | /*
|
---|
632 | * Try and see if we're lucky and get a contiguous chunk from MmAllocatePagesForMdl.
|
---|
633 | *
|
---|
634 | * This is preferable to using MmAllocateContiguousMemory because there are
|
---|
635 | * a few situations where the memory shouldn't be mapped, like for instance
|
---|
636 | * VT-x control memory. Since these are rather small allocations (one or
|
---|
637 | * two pages) MmAllocatePagesForMdl will probably be able to satisfy the
|
---|
638 | * request.
|
---|
639 | *
|
---|
640 | * If the allocation is big, the chances are *probably* not very good. The
|
---|
641 | * current limit is kind of random...
|
---|
642 | */
|
---|
643 | if ( cb < _128K
|
---|
644 | && uAlignment == PAGE_SIZE
|
---|
645 | && g_pfnrtMmAllocatePagesForMdl
|
---|
646 | && g_pfnrtMmFreePagesFromMdl)
|
---|
647 | {
|
---|
648 | PHYSICAL_ADDRESS Zero;
|
---|
649 | Zero.QuadPart = 0;
|
---|
650 | PHYSICAL_ADDRESS HighAddr;
|
---|
651 | HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
|
---|
652 | PMDL pMdl = g_pfnrtMmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
|
---|
653 | if (pMdl)
|
---|
654 | {
|
---|
655 | if (MmGetMdlByteCount(pMdl) >= cb)
|
---|
656 | {
|
---|
657 | PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMdl);
|
---|
658 | PFN_NUMBER Pfn = paPfns[0] + 1;
|
---|
659 | const size_t cPages = cb >> PAGE_SHIFT;
|
---|
660 | size_t iPage;
|
---|
661 | for (iPage = 1; iPage < cPages; iPage++, Pfn++)
|
---|
662 | if (paPfns[iPage] != Pfn)
|
---|
663 | break;
|
---|
664 | if (iPage >= cPages)
|
---|
665 | {
|
---|
666 | PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb, pszTag);
|
---|
667 | if (pMemNt)
|
---|
668 | {
|
---|
669 | pMemNt->Core.u.Phys.fAllocated = true;
|
---|
670 | pMemNt->Core.u.Phys.PhysBase = (RTHCPHYS)paPfns[0] << PAGE_SHIFT;
|
---|
671 | pMemNt->fAllocatedPagesForMdl = true;
|
---|
672 | pMemNt->cMdls = 1;
|
---|
673 | pMemNt->apMdls[0] = pMdl;
|
---|
674 | *ppMem = &pMemNt->Core;
|
---|
675 | return VINF_SUCCESS;
|
---|
676 | }
|
---|
677 | }
|
---|
678 | }
|
---|
679 | g_pfnrtMmFreePagesFromMdl(pMdl);
|
---|
680 | ExFreePool(pMdl);
|
---|
681 | }
|
---|
682 | }
|
---|
683 |
|
---|
684 | return rtR0MemObjNativeAllocContEx(ppMem, cb, false, PhysHighest, uAlignment);
|
---|
685 | }
|
---|
686 |
|
---|
687 |
|
---|
688 | DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, const char *pszTag)
|
---|
689 | {
|
---|
690 | if (g_pfnrtMmAllocatePagesForMdl && g_pfnrtMmFreePagesFromMdl)
|
---|
691 | {
|
---|
692 | /** @todo use the Ex version with the fail-if-not-all-requested-pages flag
|
---|
693 | * when possible. */
|
---|
694 | PHYSICAL_ADDRESS Zero;
|
---|
695 | Zero.QuadPart = 0;
|
---|
696 | PHYSICAL_ADDRESS HighAddr;
|
---|
697 | HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
|
---|
698 | PMDL pMdl = g_pfnrtMmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
|
---|
699 | if (pMdl)
|
---|
700 | {
|
---|
701 | if (MmGetMdlByteCount(pMdl) >= cb)
|
---|
702 | {
|
---|
703 | PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS_NC, NULL, cb, pszTag);
|
---|
704 | if (pMemNt)
|
---|
705 | {
|
---|
706 | pMemNt->fAllocatedPagesForMdl = true;
|
---|
707 | pMemNt->cMdls = 1;
|
---|
708 | pMemNt->apMdls[0] = pMdl;
|
---|
709 | *ppMem = &pMemNt->Core;
|
---|
710 | return VINF_SUCCESS;
|
---|
711 | }
|
---|
712 | }
|
---|
713 | g_pfnrtMmFreePagesFromMdl(pMdl);
|
---|
714 | ExFreePool(pMdl);
|
---|
715 | }
|
---|
716 | return VERR_NO_MEMORY;
|
---|
717 | }
|
---|
718 | return VERR_NOT_SUPPORTED;
|
---|
719 | }
|
---|
720 |
|
---|
721 |
|
---|
722 | DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy,
|
---|
723 | const char *pszTag)
|
---|
724 | {
|
---|
725 | AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE || uCachePolicy == RTMEM_CACHE_POLICY_MMIO, VERR_NOT_SUPPORTED);
|
---|
726 |
|
---|
727 | /*
|
---|
728 | * Validate the address range and create a descriptor for it.
|
---|
729 | */
|
---|
730 | PFN_NUMBER Pfn = (PFN_NUMBER)(Phys >> PAGE_SHIFT);
|
---|
731 | if (((RTHCPHYS)Pfn << PAGE_SHIFT) != Phys)
|
---|
732 | return VERR_ADDRESS_TOO_BIG;
|
---|
733 |
|
---|
734 | /*
|
---|
735 | * Create the IPRT memory object.
|
---|
736 | */
|
---|
737 | PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb, pszTag);
|
---|
738 | if (pMemNt)
|
---|
739 | {
|
---|
740 | pMemNt->Core.u.Phys.PhysBase = Phys;
|
---|
741 | pMemNt->Core.u.Phys.fAllocated = false;
|
---|
742 | pMemNt->Core.u.Phys.uCachePolicy = uCachePolicy;
|
---|
743 | *ppMem = &pMemNt->Core;
|
---|
744 | return VINF_SUCCESS;
|
---|
745 | }
|
---|
746 | return VERR_NO_MEMORY;
|
---|
747 | }
|
---|
748 |
|
---|
749 |
|
---|
750 | /**
|
---|
751 | * Internal worker for locking down pages.
|
---|
752 | *
|
---|
753 | * @return IPRT status code.
|
---|
754 | *
|
---|
755 | * @param ppMem Where to store the memory object pointer.
|
---|
756 | * @param pv First page.
|
---|
757 | * @param cb Number of bytes.
|
---|
758 | * @param fAccess The desired access, a combination of RTMEM_PROT_READ
|
---|
759 | * and RTMEM_PROT_WRITE.
|
---|
760 | * @param R0Process The process \a pv and \a cb refers to.
|
---|
761 | * @param pszTag Allocation tag used for statistics and such.
|
---|
762 | */
|
---|
763 | static int rtR0MemObjNtLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process,
|
---|
764 | const char *pszTag)
|
---|
765 | {
|
---|
766 | /*
|
---|
767 | * Calc the number of MDLs we need and allocate the memory object structure.
|
---|
768 | */
|
---|
769 | size_t cMdls = cb / MAX_LOCK_MEM_SIZE;
|
---|
770 | if (cb % MAX_LOCK_MEM_SIZE)
|
---|
771 | cMdls++;
|
---|
772 | if (cMdls >= UINT32_MAX)
|
---|
773 | return VERR_OUT_OF_RANGE;
|
---|
774 | PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(RT_UOFFSETOF_DYN(RTR0MEMOBJNT, apMdls[cMdls]),
|
---|
775 | RTR0MEMOBJTYPE_LOCK, pv, cb, pszTag);
|
---|
776 | if (!pMemNt)
|
---|
777 | return VERR_NO_MEMORY;
|
---|
778 |
|
---|
779 | /*
|
---|
780 | * Loop locking down the sub parts of the memory.
|
---|
781 | */
|
---|
782 | int rc = VINF_SUCCESS;
|
---|
783 | size_t cbTotal = 0;
|
---|
784 | uint8_t *pb = (uint8_t *)pv;
|
---|
785 | uint32_t iMdl;
|
---|
786 | for (iMdl = 0; iMdl < cMdls; iMdl++)
|
---|
787 | {
|
---|
788 | /*
|
---|
789 | * Calc the Mdl size and allocate it.
|
---|
790 | */
|
---|
791 | size_t cbCur = cb - cbTotal;
|
---|
792 | if (cbCur > MAX_LOCK_MEM_SIZE)
|
---|
793 | cbCur = MAX_LOCK_MEM_SIZE;
|
---|
794 | AssertMsg(cbCur, ("cbCur: 0!\n"));
|
---|
795 | PMDL pMdl = IoAllocateMdl(pb, (ULONG)cbCur, FALSE, FALSE, NULL);
|
---|
796 | if (!pMdl)
|
---|
797 | {
|
---|
798 | rc = VERR_NO_MEMORY;
|
---|
799 | break;
|
---|
800 | }
|
---|
801 |
|
---|
802 | /*
|
---|
803 | * Lock the pages.
|
---|
804 | */
|
---|
805 | __try
|
---|
806 | {
|
---|
807 | MmProbeAndLockPages(pMdl,
|
---|
808 | R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode,
|
---|
809 | fAccess == RTMEM_PROT_READ
|
---|
810 | ? IoReadAccess
|
---|
811 | : fAccess == RTMEM_PROT_WRITE
|
---|
812 | ? IoWriteAccess
|
---|
813 | : IoModifyAccess);
|
---|
814 |
|
---|
815 | pMemNt->apMdls[iMdl] = pMdl;
|
---|
816 | pMemNt->cMdls++;
|
---|
817 | }
|
---|
818 | __except(EXCEPTION_EXECUTE_HANDLER)
|
---|
819 | {
|
---|
820 | IoFreeMdl(pMdl);
|
---|
821 | rc = VERR_LOCK_FAILED;
|
---|
822 | break;
|
---|
823 | }
|
---|
824 |
|
---|
825 | if ( R0Process != NIL_RTR0PROCESS
|
---|
826 | && g_pfnrtMmSecureVirtualMemory
|
---|
827 | && g_pfnrtMmUnsecureVirtualMemory)
|
---|
828 | {
|
---|
829 | /* Make sure the user process can't change the allocation. */
|
---|
830 | pMemNt->pvSecureMem = g_pfnrtMmSecureVirtualMemory(pv, cb,
|
---|
831 | fAccess & RTMEM_PROT_WRITE
|
---|
832 | ? PAGE_READWRITE
|
---|
833 | : PAGE_READONLY);
|
---|
834 | if (!pMemNt->pvSecureMem)
|
---|
835 | {
|
---|
836 | rc = VERR_NO_MEMORY;
|
---|
837 | break;
|
---|
838 | }
|
---|
839 | }
|
---|
840 |
|
---|
841 | /* next */
|
---|
842 | cbTotal += cbCur;
|
---|
843 | pb += cbCur;
|
---|
844 | }
|
---|
845 | if (RT_SUCCESS(rc))
|
---|
846 | {
|
---|
847 | Assert(pMemNt->cMdls == cMdls);
|
---|
848 | pMemNt->Core.u.Lock.R0Process = R0Process;
|
---|
849 | *ppMem = &pMemNt->Core;
|
---|
850 | return rc;
|
---|
851 | }
|
---|
852 |
|
---|
853 | /*
|
---|
854 | * We failed, perform cleanups.
|
---|
855 | */
|
---|
856 | while (iMdl-- > 0)
|
---|
857 | {
|
---|
858 | MmUnlockPages(pMemNt->apMdls[iMdl]);
|
---|
859 | IoFreeMdl(pMemNt->apMdls[iMdl]);
|
---|
860 | pMemNt->apMdls[iMdl] = NULL;
|
---|
861 | }
|
---|
862 | if (pMemNt->pvSecureMem)
|
---|
863 | {
|
---|
864 | if (g_pfnrtMmUnsecureVirtualMemory)
|
---|
865 | g_pfnrtMmUnsecureVirtualMemory(pMemNt->pvSecureMem);
|
---|
866 | pMemNt->pvSecureMem = NULL;
|
---|
867 | }
|
---|
868 |
|
---|
869 | rtR0MemObjDelete(&pMemNt->Core);
|
---|
870 | return rc;
|
---|
871 | }
|
---|
872 |
|
---|
873 |
|
---|
874 | DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess,
|
---|
875 | RTR0PROCESS R0Process, const char *pszTag)
|
---|
876 | {
|
---|
877 | AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
|
---|
878 | /* (Can use MmProbeAndLockProcessPages if we need to mess with other processes later.) */
|
---|
879 | return rtR0MemObjNtLock(ppMem, (void *)R3Ptr, cb, fAccess, R0Process, pszTag);
|
---|
880 | }
|
---|
881 |
|
---|
882 |
|
---|
883 | DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess, const char *pszTag)
|
---|
884 | {
|
---|
885 | return rtR0MemObjNtLock(ppMem, pv, cb, fAccess, NIL_RTR0PROCESS, pszTag);
|
---|
886 | }
|
---|
887 |
|
---|
888 |
|
---|
889 | DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment,
|
---|
890 | const char *pszTag)
|
---|
891 | {
|
---|
892 | /*
|
---|
893 | * MmCreateSection(SEC_RESERVE) + MmMapViewInSystemSpace perhaps?
|
---|
894 | * Or MmAllocateMappingAddress?
|
---|
895 | */
|
---|
896 | RT_NOREF(ppMem, pvFixed, cb, uAlignment, pszTag);
|
---|
897 | return VERR_NOT_SUPPORTED;
|
---|
898 | }
|
---|
899 |
|
---|
900 |
|
---|
901 | DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment,
|
---|
902 | RTR0PROCESS R0Process, const char *pszTag)
|
---|
903 | {
|
---|
904 | /*
|
---|
905 | * ZeCreateSection(SEC_RESERVE) + ZwMapViewOfSection perhaps?
|
---|
906 | */
|
---|
907 | RT_NOREF(ppMem, R3PtrFixed, cb, uAlignment, R0Process, pszTag);
|
---|
908 | return VERR_NOT_SUPPORTED;
|
---|
909 | }
|
---|
910 |
|
---|
911 |
|
---|
912 | /**
|
---|
913 | * Internal worker for rtR0MemObjNativeMapKernel and rtR0MemObjNativeMapUser.
|
---|
914 | *
|
---|
915 | * @returns IPRT status code.
|
---|
916 | * @param ppMem Where to store the memory object for the mapping.
|
---|
917 | * @param pMemToMap The memory object to map.
|
---|
918 | * @param pvFixed Where to map it. (void *)-1 if anywhere is fine.
|
---|
919 | * @param uAlignment The alignment requirement for the mapping.
|
---|
920 | * @param fProt The desired page protection for the mapping.
|
---|
921 | * @param R0Process If NIL_RTR0PROCESS map into system (kernel) memory.
|
---|
922 | * If not nil, it's the current process.
|
---|
923 | * @param offSub Offset into @a pMemToMap to start mapping.
|
---|
924 | * @param cbSub The number of bytes to map from @a pMapToMem. 0 if
|
---|
925 | * we're to map everything. Non-zero if @a offSub is
|
---|
926 | * non-zero.
|
---|
927 | * @param pszTag Allocation tag used for statistics and such.
|
---|
928 | */
|
---|
929 | static int rtR0MemObjNtMap(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
|
---|
930 | unsigned fProt, RTR0PROCESS R0Process, size_t offSub, size_t cbSub, const char *pszTag)
|
---|
931 | {
|
---|
932 | int rc = VERR_MAP_FAILED;
|
---|
933 |
|
---|
934 | /*
|
---|
935 | * Check that the specified alignment is supported.
|
---|
936 | */
|
---|
937 | if (uAlignment > PAGE_SIZE)
|
---|
938 | return VERR_NOT_SUPPORTED;
|
---|
939 |
|
---|
940 | /*
|
---|
941 | * There are two basic cases here, either we've got an MDL and can
|
---|
942 | * map it using MmMapLockedPages, or we've got a contiguous physical
|
---|
943 | * range (MMIO most likely) and can use MmMapIoSpace.
|
---|
944 | */
|
---|
945 | PRTR0MEMOBJNT pMemNtToMap = (PRTR0MEMOBJNT)pMemToMap;
|
---|
946 | if (pMemNtToMap->cMdls)
|
---|
947 | {
|
---|
948 | /* don't attempt map locked regions with more than one mdl. */
|
---|
949 | if (pMemNtToMap->cMdls != 1)
|
---|
950 | return VERR_NOT_SUPPORTED;
|
---|
951 |
|
---|
952 | /* Need g_pfnrtMmMapLockedPagesSpecifyCache to map to a specific address. */
|
---|
953 | if (pvFixed != (void *)-1 && g_pfnrtMmMapLockedPagesSpecifyCache == NULL)
|
---|
954 | return VERR_NOT_SUPPORTED;
|
---|
955 |
|
---|
956 | /* we can't map anything to the first page, sorry. */
|
---|
957 | if (pvFixed == 0)
|
---|
958 | return VERR_NOT_SUPPORTED;
|
---|
959 |
|
---|
960 | /* only one system mapping for now - no time to figure out MDL restrictions right now. */
|
---|
961 | if ( pMemNtToMap->Core.uRel.Parent.cMappings
|
---|
962 | && R0Process == NIL_RTR0PROCESS)
|
---|
963 | {
|
---|
964 | if (pMemNtToMap->Core.enmType != RTR0MEMOBJTYPE_PHYS_NC)
|
---|
965 | return VERR_NOT_SUPPORTED;
|
---|
966 | uint32_t iMapping = pMemNtToMap->Core.uRel.Parent.cMappings;
|
---|
967 | while (iMapping-- > 0)
|
---|
968 | {
|
---|
969 | PRTR0MEMOBJNT pMapping = (PRTR0MEMOBJNT)pMemNtToMap->Core.uRel.Parent.papMappings[iMapping];
|
---|
970 | if ( pMapping->Core.enmType != RTR0MEMOBJTYPE_MAPPING
|
---|
971 | || pMapping->Core.u.Mapping.R0Process == NIL_RTR0PROCESS)
|
---|
972 | return VERR_NOT_SUPPORTED;
|
---|
973 | }
|
---|
974 | }
|
---|
975 |
|
---|
976 | /* Create a partial MDL if this is a sub-range request. */
|
---|
977 | PMDL pMdl;
|
---|
978 | if (!offSub && !cbSub)
|
---|
979 | pMdl = pMemNtToMap->apMdls[0];
|
---|
980 | else
|
---|
981 | {
|
---|
982 | pMdl = IoAllocateMdl(NULL, (ULONG)cbSub, FALSE, FALSE, NULL);
|
---|
983 | if (pMdl)
|
---|
984 | IoBuildPartialMdl(pMemNtToMap->apMdls[0], pMdl,
|
---|
985 | (uint8_t *)MmGetMdlVirtualAddress(pMemNtToMap->apMdls[0]) + offSub, (ULONG)cbSub);
|
---|
986 | else
|
---|
987 | {
|
---|
988 | IoFreeMdl(pMdl);
|
---|
989 | return VERR_NO_MEMORY;
|
---|
990 | }
|
---|
991 | }
|
---|
992 |
|
---|
993 | __try
|
---|
994 | {
|
---|
995 | /** @todo uAlignment */
|
---|
996 | /** @todo How to set the protection on the pages? */
|
---|
997 | void *pv;
|
---|
998 | if (g_pfnrtMmMapLockedPagesSpecifyCache)
|
---|
999 | pv = g_pfnrtMmMapLockedPagesSpecifyCache(pMdl,
|
---|
1000 | R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode,
|
---|
1001 | MmCached,
|
---|
1002 | pvFixed != (void *)-1 ? pvFixed : NULL,
|
---|
1003 | FALSE /* no bug check on failure */,
|
---|
1004 | NormalPagePriority);
|
---|
1005 | else
|
---|
1006 | pv = MmMapLockedPages(pMdl, R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode);
|
---|
1007 | if (pv)
|
---|
1008 | {
|
---|
1009 | NOREF(fProt);
|
---|
1010 |
|
---|
1011 | PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew( !offSub && !cbSub
|
---|
1012 | ? sizeof(*pMemNt) : RT_UOFFSETOF_DYN(RTR0MEMOBJNT, apMdls[1]),
|
---|
1013 | RTR0MEMOBJTYPE_MAPPING, pv, pMemNtToMap->Core.cb, pszTag);
|
---|
1014 | if (pMemNt)
|
---|
1015 | {
|
---|
1016 | pMemNt->Core.u.Mapping.R0Process = R0Process;
|
---|
1017 | if (!offSub && !cbSub)
|
---|
1018 | pMemNt->fSubMapping = false;
|
---|
1019 | else
|
---|
1020 | {
|
---|
1021 | pMemNt->apMdls[0] = pMdl;
|
---|
1022 | pMemNt->cMdls = 1;
|
---|
1023 | pMemNt->fSubMapping = true;
|
---|
1024 | }
|
---|
1025 |
|
---|
1026 | *ppMem = &pMemNt->Core;
|
---|
1027 | return VINF_SUCCESS;
|
---|
1028 | }
|
---|
1029 |
|
---|
1030 | rc = VERR_NO_MEMORY;
|
---|
1031 | MmUnmapLockedPages(pv, pMdl);
|
---|
1032 | }
|
---|
1033 | }
|
---|
1034 | __except(EXCEPTION_EXECUTE_HANDLER)
|
---|
1035 | {
|
---|
1036 | #ifdef LOG_ENABLED
|
---|
1037 | NTSTATUS rcNt = GetExceptionCode();
|
---|
1038 | Log(("rtR0MemObjNtMap: Exception Code %#x\n", rcNt));
|
---|
1039 | #endif
|
---|
1040 |
|
---|
1041 | /* nothing */
|
---|
1042 | rc = VERR_MAP_FAILED;
|
---|
1043 | }
|
---|
1044 |
|
---|
1045 | }
|
---|
1046 | else
|
---|
1047 | {
|
---|
1048 | AssertReturn( pMemNtToMap->Core.enmType == RTR0MEMOBJTYPE_PHYS
|
---|
1049 | && !pMemNtToMap->Core.u.Phys.fAllocated, VERR_INTERNAL_ERROR);
|
---|
1050 |
|
---|
1051 | /* cannot map phys mem to user space (yet). */
|
---|
1052 | if (R0Process != NIL_RTR0PROCESS)
|
---|
1053 | return VERR_NOT_SUPPORTED;
|
---|
1054 |
|
---|
1055 | /* Cannot sub-mak these (yet). */
|
---|
1056 | AssertMsgReturn(!offSub && !cbSub, ("%#zx %#zx\n", offSub, cbSub), VERR_NOT_SUPPORTED);
|
---|
1057 |
|
---|
1058 |
|
---|
1059 | /** @todo uAlignment */
|
---|
1060 | /** @todo How to set the protection on the pages? */
|
---|
1061 | PHYSICAL_ADDRESS Phys;
|
---|
1062 | Phys.QuadPart = pMemNtToMap->Core.u.Phys.PhysBase;
|
---|
1063 | void *pv = MmMapIoSpace(Phys, pMemNtToMap->Core.cb,
|
---|
1064 | pMemNtToMap->Core.u.Phys.uCachePolicy == RTMEM_CACHE_POLICY_MMIO ? MmNonCached : MmCached);
|
---|
1065 | if (pv)
|
---|
1066 | {
|
---|
1067 | PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING, pv,
|
---|
1068 | pMemNtToMap->Core.cb, pszTag);
|
---|
1069 | if (pMemNt)
|
---|
1070 | {
|
---|
1071 | pMemNt->Core.u.Mapping.R0Process = R0Process;
|
---|
1072 | *ppMem = &pMemNt->Core;
|
---|
1073 | return VINF_SUCCESS;
|
---|
1074 | }
|
---|
1075 |
|
---|
1076 | rc = VERR_NO_MEMORY;
|
---|
1077 | MmUnmapIoSpace(pv, pMemNtToMap->Core.cb);
|
---|
1078 | }
|
---|
1079 | }
|
---|
1080 |
|
---|
1081 | NOREF(uAlignment); NOREF(fProt);
|
---|
1082 | return rc;
|
---|
1083 | }
|
---|
1084 |
|
---|
1085 |
|
---|
1086 | DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
|
---|
1087 | unsigned fProt, size_t offSub, size_t cbSub, const char *pszTag)
|
---|
1088 | {
|
---|
1089 | return rtR0MemObjNtMap(ppMem, pMemToMap, pvFixed, uAlignment, fProt, NIL_RTR0PROCESS, offSub, cbSub, pszTag);
|
---|
1090 | }
|
---|
1091 |
|
---|
1092 |
|
---|
1093 | DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment,
|
---|
1094 | unsigned fProt, RTR0PROCESS R0Process, size_t offSub, size_t cbSub, const char *pszTag)
|
---|
1095 | {
|
---|
1096 | AssertReturn(R0Process == RTR0ProcHandleSelf(), VERR_NOT_SUPPORTED);
|
---|
1097 | return rtR0MemObjNtMap(ppMem, pMemToMap, (void *)R3PtrFixed, uAlignment, fProt, R0Process, offSub, cbSub, pszTag);
|
---|
1098 | }
|
---|
1099 |
|
---|
1100 |
|
---|
1101 | DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
|
---|
1102 | {
|
---|
1103 | #if 0
|
---|
1104 | PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
|
---|
1105 | #endif
|
---|
1106 |
|
---|
1107 | /*
|
---|
1108 | * Seems there are some issues with this MmProtectMdlSystemAddress API, so
|
---|
1109 | * this code isn't currently enabled until we've tested it with the verifier.
|
---|
1110 | */
|
---|
1111 | #if 0
|
---|
1112 | /*
|
---|
1113 | * The API we've got requires a kernel mapping.
|
---|
1114 | */
|
---|
1115 | if ( pMemNt->cMdls
|
---|
1116 | && g_pfnrtMmProtectMdlSystemAddress
|
---|
1117 | && (g_uRtNtMajorVer > 6 || (g_uRtNtMajorVer == 6 && g_uRtNtMinorVer >= 1)) /* Windows 7 and later. */
|
---|
1118 | && pMemNt->Core.pv != NULL
|
---|
1119 | && ( pMemNt->Core.enmType == RTR0MEMOBJTYPE_PAGE
|
---|
1120 | || pMemNt->Core.enmType == RTR0MEMOBJTYPE_LOW
|
---|
1121 | || pMemNt->Core.enmType == RTR0MEMOBJTYPE_CONT
|
---|
1122 | || ( pMemNt->Core.enmType == RTR0MEMOBJTYPE_LOCK
|
---|
1123 | && pMemNt->Core.u.Lock.R0Process == NIL_RTPROCESS)
|
---|
1124 | || ( pMemNt->Core.enmType == RTR0MEMOBJTYPE_MAPPING
|
---|
1125 | && pMemNt->Core.u.Mapping.R0Process == NIL_RTPROCESS) ) )
|
---|
1126 | {
|
---|
1127 | /* Convert the protection. */
|
---|
1128 | LOCK_OPERATION enmLockOp;
|
---|
1129 | ULONG fAccess;
|
---|
1130 | switch (fProt)
|
---|
1131 | {
|
---|
1132 | case RTMEM_PROT_NONE:
|
---|
1133 | fAccess = PAGE_NOACCESS;
|
---|
1134 | enmLockOp = IoReadAccess;
|
---|
1135 | break;
|
---|
1136 | case RTMEM_PROT_READ:
|
---|
1137 | fAccess = PAGE_READONLY;
|
---|
1138 | enmLockOp = IoReadAccess;
|
---|
1139 | break;
|
---|
1140 | case RTMEM_PROT_WRITE:
|
---|
1141 | case RTMEM_PROT_WRITE | RTMEM_PROT_READ:
|
---|
1142 | fAccess = PAGE_READWRITE;
|
---|
1143 | enmLockOp = IoModifyAccess;
|
---|
1144 | break;
|
---|
1145 | case RTMEM_PROT_EXEC:
|
---|
1146 | fAccess = PAGE_EXECUTE;
|
---|
1147 | enmLockOp = IoReadAccess;
|
---|
1148 | break;
|
---|
1149 | case RTMEM_PROT_EXEC | RTMEM_PROT_READ:
|
---|
1150 | fAccess = PAGE_EXECUTE_READ;
|
---|
1151 | enmLockOp = IoReadAccess;
|
---|
1152 | break;
|
---|
1153 | case RTMEM_PROT_EXEC | RTMEM_PROT_WRITE:
|
---|
1154 | case RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ:
|
---|
1155 | fAccess = PAGE_EXECUTE_READWRITE;
|
---|
1156 | enmLockOp = IoModifyAccess;
|
---|
1157 | break;
|
---|
1158 | default:
|
---|
1159 | AssertFailedReturn(VERR_INVALID_FLAGS);
|
---|
1160 | }
|
---|
1161 |
|
---|
1162 | NTSTATUS rcNt = STATUS_SUCCESS;
|
---|
1163 | # if 0 /** @todo test this against the verifier. */
|
---|
1164 | if (offSub == 0 && pMemNt->Core.cb == cbSub)
|
---|
1165 | {
|
---|
1166 | uint32_t iMdl = pMemNt->cMdls;
|
---|
1167 | while (iMdl-- > 0)
|
---|
1168 | {
|
---|
1169 | rcNt = g_pfnrtMmProtectMdlSystemAddress(pMemNt->apMdls[i], fAccess);
|
---|
1170 | if (!NT_SUCCESS(rcNt))
|
---|
1171 | break;
|
---|
1172 | }
|
---|
1173 | }
|
---|
1174 | else
|
---|
1175 | # endif
|
---|
1176 | {
|
---|
1177 | /*
|
---|
1178 | * We ASSUME the following here:
|
---|
1179 | * - MmProtectMdlSystemAddress can deal with nonpaged pool memory
|
---|
1180 | * - MmProtectMdlSystemAddress doesn't actually store anything in the MDL we pass it.
|
---|
1181 | * - We are not required to call MmProtectMdlSystemAddress with PAGE_READWRITE for the
|
---|
1182 | * exact same ranges prior to freeing them.
|
---|
1183 | *
|
---|
1184 | * So, we lock the pages temporarily, call the API and unlock them.
|
---|
1185 | */
|
---|
1186 | uint8_t *pbCur = (uint8_t *)pMemNt->Core.pv + offSub;
|
---|
1187 | while (cbSub > 0 && NT_SUCCESS(rcNt))
|
---|
1188 | {
|
---|
1189 | size_t cbCur = cbSub;
|
---|
1190 | if (cbCur > MAX_LOCK_MEM_SIZE)
|
---|
1191 | cbCur = MAX_LOCK_MEM_SIZE;
|
---|
1192 | PMDL pMdl = IoAllocateMdl(pbCur, (ULONG)cbCur, FALSE, FALSE, NULL);
|
---|
1193 | if (pMdl)
|
---|
1194 | {
|
---|
1195 | __try
|
---|
1196 | {
|
---|
1197 | MmProbeAndLockPages(pMdl, KernelMode, enmLockOp);
|
---|
1198 | }
|
---|
1199 | __except(EXCEPTION_EXECUTE_HANDLER)
|
---|
1200 | {
|
---|
1201 | rcNt = GetExceptionCode();
|
---|
1202 | }
|
---|
1203 | if (NT_SUCCESS(rcNt))
|
---|
1204 | {
|
---|
1205 | rcNt = g_pfnrtMmProtectMdlSystemAddress(pMdl, fAccess);
|
---|
1206 | MmUnlockPages(pMdl);
|
---|
1207 | }
|
---|
1208 | IoFreeMdl(pMdl);
|
---|
1209 | }
|
---|
1210 | else
|
---|
1211 | rcNt = STATUS_NO_MEMORY;
|
---|
1212 | pbCur += cbCur;
|
---|
1213 | cbSub -= cbCur;
|
---|
1214 | }
|
---|
1215 | }
|
---|
1216 |
|
---|
1217 | if (NT_SUCCESS(rcNt))
|
---|
1218 | return VINF_SUCCESS;
|
---|
1219 | return RTErrConvertFromNtStatus(rcNt);
|
---|
1220 | }
|
---|
1221 | #else
|
---|
1222 | RT_NOREF4(pMem, offSub, cbSub, fProt);
|
---|
1223 | #endif
|
---|
1224 |
|
---|
1225 | return VERR_NOT_SUPPORTED;
|
---|
1226 | }
|
---|
1227 |
|
---|
1228 |
|
---|
1229 | DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
|
---|
1230 | {
|
---|
1231 | PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
|
---|
1232 |
|
---|
1233 | if (pMemNt->cMdls)
|
---|
1234 | {
|
---|
1235 | if (pMemNt->cMdls == 1)
|
---|
1236 | {
|
---|
1237 | PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[0]);
|
---|
1238 | return (RTHCPHYS)paPfns[iPage] << PAGE_SHIFT;
|
---|
1239 | }
|
---|
1240 |
|
---|
1241 | size_t iMdl = iPage / (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
|
---|
1242 | size_t iMdlPfn = iPage % (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
|
---|
1243 | PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[iMdl]);
|
---|
1244 | return (RTHCPHYS)paPfns[iMdlPfn] << PAGE_SHIFT;
|
---|
1245 | }
|
---|
1246 |
|
---|
1247 | switch (pMemNt->Core.enmType)
|
---|
1248 | {
|
---|
1249 | case RTR0MEMOBJTYPE_MAPPING:
|
---|
1250 | return rtR0MemObjNativeGetPagePhysAddr(pMemNt->Core.uRel.Child.pParent, iPage);
|
---|
1251 |
|
---|
1252 | case RTR0MEMOBJTYPE_PHYS:
|
---|
1253 | return pMemNt->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT);
|
---|
1254 |
|
---|
1255 | case RTR0MEMOBJTYPE_PAGE:
|
---|
1256 | case RTR0MEMOBJTYPE_PHYS_NC:
|
---|
1257 | case RTR0MEMOBJTYPE_LOW:
|
---|
1258 | case RTR0MEMOBJTYPE_CONT:
|
---|
1259 | case RTR0MEMOBJTYPE_LOCK:
|
---|
1260 | default:
|
---|
1261 | AssertMsgFailed(("%d\n", pMemNt->Core.enmType));
|
---|
1262 | case RTR0MEMOBJTYPE_RES_VIRT:
|
---|
1263 | return NIL_RTHCPHYS;
|
---|
1264 | }
|
---|
1265 | }
|
---|
1266 |
|
---|