VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c@ 33540

Last change on this file since 33540 was 33540, checked in by vboxsync, 14 years ago

*: spelling fixes, thanks Timeless!

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Rev
File size: 46.6 KB
Line 
1/* $Revision: 33540 $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, Linux.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*******************************************************************************
29* Header Files *
30*******************************************************************************/
31#include "the-linux-kernel.h"
32
33#include <iprt/memobj.h>
34#include <iprt/alloc.h>
35#include <iprt/assert.h>
36#include <iprt/log.h>
37#include <iprt/process.h>
38#include <iprt/string.h>
39#include "internal/memobj.h"
40
41
42/*******************************************************************************
43* Defined Constants And Macros *
44*******************************************************************************/
45/* early 2.6 kernels */
46#ifndef PAGE_SHARED_EXEC
47# define PAGE_SHARED_EXEC PAGE_SHARED
48#endif
49#ifndef PAGE_READONLY_EXEC
50# define PAGE_READONLY_EXEC PAGE_READONLY
51#endif
52
53/*
54 * 2.6.29+ kernels don't work with remap_pfn_range() anymore because
55 * track_pfn_vma_new() is apparently not defined for non-RAM pages.
56 * It should be safe to use vm_insert_page() older kernels as well.
57 */
58#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23)
59# define VBOX_USE_INSERT_PAGE
60#endif
61#if defined(CONFIG_X86_PAE) \
62 && ( HAVE_26_STYLE_REMAP_PAGE_RANGE \
63 || (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) && LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11)))
64# define VBOX_USE_PAE_HACK
65#endif
66
67
68/*******************************************************************************
69* Structures and Typedefs *
70*******************************************************************************/
71/**
72 * The Darwin version of the memory object structure.
73 */
74typedef struct RTR0MEMOBJLNX
75{
76 /** The core structure. */
77 RTR0MEMOBJINTERNAL Core;
78 /** Set if the allocation is contiguous.
79 * This means it has to be given back as one chunk. */
80 bool fContiguous;
81 /** Set if we've vmap'ed the memory into ring-0. */
82 bool fMappedToRing0;
83 /** The pages in the apPages array. */
84 size_t cPages;
85 /** Array of struct page pointers. (variable size) */
86 struct page *apPages[1];
87} RTR0MEMOBJLNX, *PRTR0MEMOBJLNX;
88
89
90static void rtR0MemObjLinuxFreePages(PRTR0MEMOBJLNX pMemLnx);
91
92
93/**
94 * Helper that converts from a RTR0PROCESS handle to a linux task.
95 *
96 * @returns The corresponding Linux task.
97 * @param R0Process IPRT ring-0 process handle.
98 */
99struct task_struct *rtR0ProcessToLinuxTask(RTR0PROCESS R0Process)
100{
101 /** @todo fix rtR0ProcessToLinuxTask!! */
102 return R0Process == RTR0ProcHandleSelf() ? current : NULL;
103}
104
105
106/**
107 * Compute order. Some functions allocate 2^order pages.
108 *
109 * @returns order.
110 * @param cPages Number of pages.
111 */
112static int rtR0MemObjLinuxOrder(size_t cPages)
113{
114 int iOrder;
115 size_t cTmp;
116
117 for (iOrder = 0, cTmp = cPages; cTmp >>= 1; ++iOrder)
118 ;
119 if (cPages & ~((size_t)1 << iOrder))
120 ++iOrder;
121
122 return iOrder;
123}
124
125
126/**
127 * Converts from RTMEM_PROT_* to Linux PAGE_*.
128 *
129 * @returns Linux page protection constant.
130 * @param fProt The IPRT protection mask.
131 * @param fKernel Whether it applies to kernel or user space.
132 */
133static pgprot_t rtR0MemObjLinuxConvertProt(unsigned fProt, bool fKernel)
134{
135 switch (fProt)
136 {
137 default:
138 AssertMsgFailed(("%#x %d\n", fProt, fKernel));
139 case RTMEM_PROT_NONE:
140 return PAGE_NONE;
141
142 case RTMEM_PROT_READ:
143 return fKernel ? PAGE_KERNEL_RO : PAGE_READONLY;
144
145 case RTMEM_PROT_WRITE:
146 case RTMEM_PROT_WRITE | RTMEM_PROT_READ:
147 return fKernel ? PAGE_KERNEL : PAGE_SHARED;
148
149 case RTMEM_PROT_EXEC:
150 case RTMEM_PROT_EXEC | RTMEM_PROT_READ:
151#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
152 if (fKernel)
153 {
154 pgprot_t fPg = MY_PAGE_KERNEL_EXEC;
155 pgprot_val(fPg) &= ~_PAGE_RW;
156 return fPg;
157 }
158 return PAGE_READONLY_EXEC;
159#else
160 return fKernel ? MY_PAGE_KERNEL_EXEC : PAGE_READONLY_EXEC;
161#endif
162
163 case RTMEM_PROT_WRITE | RTMEM_PROT_EXEC:
164 case RTMEM_PROT_WRITE | RTMEM_PROT_EXEC | RTMEM_PROT_READ:
165 return fKernel ? MY_PAGE_KERNEL_EXEC : PAGE_SHARED_EXEC;
166 }
167}
168
169
170/**
171 * Internal worker that allocates physical pages and creates the memory object for them.
172 *
173 * @returns IPRT status code.
174 * @param ppMemLnx Where to store the memory object pointer.
175 * @param enmType The object type.
176 * @param cb The number of bytes to allocate.
177 * @param uAlignment The alignment of the physical memory.
178 * Only valid if fContiguous == true, ignored otherwise.
179 * @param fFlagsLnx The page allocation flags (GPFs).
180 * @param fContiguous Whether the allocation must be contiguous.
181 */
182static int rtR0MemObjLinuxAllocPages(PRTR0MEMOBJLNX *ppMemLnx, RTR0MEMOBJTYPE enmType, size_t cb,
183 size_t uAlignment, unsigned fFlagsLnx, bool fContiguous)
184{
185 size_t iPage;
186 size_t const cPages = cb >> PAGE_SHIFT;
187 struct page *paPages;
188
189 /*
190 * Allocate a memory object structure that's large enough to contain
191 * the page pointer array.
192 */
193 PRTR0MEMOBJLNX pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJLNX, apPages[cPages]), enmType, NULL, cb);
194 if (!pMemLnx)
195 return VERR_NO_MEMORY;
196 pMemLnx->cPages = cPages;
197
198 /*
199 * Allocate the pages.
200 * For small allocations we'll try contiguous first and then fall back on page by page.
201 */
202#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
203 if ( fContiguous
204 || cb <= PAGE_SIZE * 2)
205 {
206# ifdef VBOX_USE_INSERT_PAGE
207 paPages = alloc_pages(fFlagsLnx | __GFP_COMP, rtR0MemObjLinuxOrder(cPages));
208# else
209 paPages = alloc_pages(fFlagsLnx, rtR0MemObjLinuxOrder(cPages));
210# endif
211 if (paPages)
212 {
213 fContiguous = true;
214 for (iPage = 0; iPage < cPages; iPage++)
215 pMemLnx->apPages[iPage] = &paPages[iPage];
216 }
217 else if (fContiguous)
218 {
219 rtR0MemObjDelete(&pMemLnx->Core);
220 return VERR_NO_MEMORY;
221 }
222 }
223
224 if (!fContiguous)
225 {
226 for (iPage = 0; iPage < cPages; iPage++)
227 {
228 pMemLnx->apPages[iPage] = alloc_page(fFlagsLnx);
229 if (RT_UNLIKELY(!pMemLnx->apPages[iPage]))
230 {
231 while (iPage-- > 0)
232 __free_page(pMemLnx->apPages[iPage]);
233 rtR0MemObjDelete(&pMemLnx->Core);
234 return VERR_NO_MEMORY;
235 }
236 }
237 }
238
239#else /* < 2.4.22 */
240 /** @todo figure out why we didn't allocate page-by-page on 2.4.21 and older... */
241 paPages = alloc_pages(fFlagsLnx, rtR0MemObjLinuxOrder(cPages));
242 if (!paPages)
243 {
244 rtR0MemObjDelete(&pMemLnx->Core);
245 return VERR_NO_MEMORY;
246 }
247 for (iPage = 0; iPage < cPages; iPage++)
248 {
249 pMemLnx->apPages[iPage] = &paPages[iPage];
250 MY_SET_PAGES_EXEC(pMemLnx->apPages[iPage], 1);
251 if (PageHighMem(pMemLnx->apPages[iPage]))
252 BUG();
253 }
254
255 fContiguous = true;
256#endif /* < 2.4.22 */
257 pMemLnx->fContiguous = fContiguous;
258
259 /*
260 * Reserve the pages.
261 */
262 for (iPage = 0; iPage < cPages; iPage++)
263 SetPageReserved(pMemLnx->apPages[iPage]);
264
265 /*
266 * Note that the physical address of memory allocated with alloc_pages(flags, order)
267 * is always 2^(PAGE_SHIFT+order)-aligned.
268 */
269 if ( fContiguous
270 && uAlignment > PAGE_SIZE)
271 {
272 /*
273 * Check for alignment constraints. The physical address of memory allocated with
274 * alloc_pages(flags, order) is always 2^(PAGE_SHIFT+order)-aligned.
275 */
276 if (RT_UNLIKELY(page_to_phys(pMemLnx->apPages[0]) & (uAlignment - 1)))
277 {
278 /*
279 * This should never happen!
280 */
281 printk("rtR0MemObjLinuxAllocPages(cb=0x%lx, uAlignment=0x%lx): alloc_pages(..., %d) returned physical memory at 0x%lx!\n",
282 (unsigned long)cb, (unsigned long)uAlignment, rtR0MemObjLinuxOrder(cPages), (unsigned long)page_to_phys(pMemLnx->apPages[0]));
283 rtR0MemObjLinuxFreePages(pMemLnx);
284 return VERR_NO_MEMORY;
285 }
286 }
287
288 *ppMemLnx = pMemLnx;
289 return VINF_SUCCESS;
290}
291
292
293/**
294 * Frees the physical pages allocated by the rtR0MemObjLinuxAllocPages() call.
295 *
296 * This method does NOT free the object.
297 *
298 * @param pMemLnx The object which physical pages should be freed.
299 */
300static void rtR0MemObjLinuxFreePages(PRTR0MEMOBJLNX pMemLnx)
301{
302 size_t iPage = pMemLnx->cPages;
303 if (iPage > 0)
304 {
305 /*
306 * Restore the page flags.
307 */
308 while (iPage-- > 0)
309 {
310 ClearPageReserved(pMemLnx->apPages[iPage]);
311#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
312#else
313 MY_SET_PAGES_NOEXEC(pMemLnx->apPages[iPage], 1);
314#endif
315 }
316
317 /*
318 * Free the pages.
319 */
320#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
321 if (!pMemLnx->fContiguous)
322 {
323 iPage = pMemLnx->cPages;
324 while (iPage-- > 0)
325 __free_page(pMemLnx->apPages[iPage]);
326 }
327 else
328#endif
329 __free_pages(pMemLnx->apPages[0], rtR0MemObjLinuxOrder(pMemLnx->cPages));
330
331 pMemLnx->cPages = 0;
332 }
333}
334
335
336/**
337 * Maps the allocation into ring-0.
338 *
339 * This will update the RTR0MEMOBJLNX::Core.pv and RTR0MEMOBJ::fMappedToRing0 members.
340 *
341 * Contiguous mappings that isn't in 'high' memory will already be mapped into kernel
342 * space, so we'll use that mapping if possible. If execute access is required, we'll
343 * play safe and do our own mapping.
344 *
345 * @returns IPRT status code.
346 * @param pMemLnx The linux memory object to map.
347 * @param fExecutable Whether execute access is required.
348 */
349static int rtR0MemObjLinuxVMap(PRTR0MEMOBJLNX pMemLnx, bool fExecutable)
350{
351 int rc = VINF_SUCCESS;
352
353 /*
354 * Choose mapping strategy.
355 */
356 bool fMustMap = fExecutable
357 || !pMemLnx->fContiguous;
358 if (!fMustMap)
359 {
360 size_t iPage = pMemLnx->cPages;
361 while (iPage-- > 0)
362 if (PageHighMem(pMemLnx->apPages[iPage]))
363 {
364 fMustMap = true;
365 break;
366 }
367 }
368
369 Assert(!pMemLnx->Core.pv);
370 Assert(!pMemLnx->fMappedToRing0);
371
372 if (fMustMap)
373 {
374 /*
375 * Use vmap - 2.4.22 and later.
376 */
377#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
378 pgprot_t fPg;
379 pgprot_val(fPg) = _PAGE_PRESENT | _PAGE_RW;
380# ifdef _PAGE_NX
381 if (!fExecutable)
382 pgprot_val(fPg) |= _PAGE_NX;
383# endif
384
385# ifdef VM_MAP
386 pMemLnx->Core.pv = vmap(&pMemLnx->apPages[0], pMemLnx->cPages, VM_MAP, fPg);
387# else
388 pMemLnx->Core.pv = vmap(&pMemLnx->apPages[0], pMemLnx->cPages, VM_ALLOC, fPg);
389# endif
390 if (pMemLnx->Core.pv)
391 pMemLnx->fMappedToRing0 = true;
392 else
393 rc = VERR_MAP_FAILED;
394#else /* < 2.4.22 */
395 rc = VERR_NOT_SUPPORTED;
396#endif
397 }
398 else
399 {
400 /*
401 * Use the kernel RAM mapping.
402 */
403 pMemLnx->Core.pv = phys_to_virt(page_to_phys(pMemLnx->apPages[0]));
404 Assert(pMemLnx->Core.pv);
405 }
406
407 return rc;
408}
409
410
411/**
412 * Undos what rtR0MemObjLinuxVMap() did.
413 *
414 * @param pMemLnx The linux memory object.
415 */
416static void rtR0MemObjLinuxVUnmap(PRTR0MEMOBJLNX pMemLnx)
417{
418#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
419 if (pMemLnx->fMappedToRing0)
420 {
421 Assert(pMemLnx->Core.pv);
422 vunmap(pMemLnx->Core.pv);
423 pMemLnx->fMappedToRing0 = false;
424 }
425#else /* < 2.4.22 */
426 Assert(!pMemLnx->fMappedToRing0);
427#endif
428 pMemLnx->Core.pv = NULL;
429}
430
431
432int rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
433{
434 PRTR0MEMOBJLNX pMemLnx = (PRTR0MEMOBJLNX)pMem;
435
436 /*
437 * Release any memory that we've allocated or locked.
438 */
439 switch (pMemLnx->Core.enmType)
440 {
441 case RTR0MEMOBJTYPE_LOW:
442 case RTR0MEMOBJTYPE_PAGE:
443 case RTR0MEMOBJTYPE_CONT:
444 case RTR0MEMOBJTYPE_PHYS:
445 case RTR0MEMOBJTYPE_PHYS_NC:
446 rtR0MemObjLinuxVUnmap(pMemLnx);
447 rtR0MemObjLinuxFreePages(pMemLnx);
448 break;
449
450 case RTR0MEMOBJTYPE_LOCK:
451 if (pMemLnx->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
452 {
453 struct task_struct *pTask = rtR0ProcessToLinuxTask(pMemLnx->Core.u.Lock.R0Process);
454 size_t iPage;
455 Assert(pTask);
456 if (pTask && pTask->mm)
457 down_read(&pTask->mm->mmap_sem);
458
459 iPage = pMemLnx->cPages;
460 while (iPage-- > 0)
461 {
462 if (!PageReserved(pMemLnx->apPages[iPage]))
463 SetPageDirty(pMemLnx->apPages[iPage]);
464 page_cache_release(pMemLnx->apPages[iPage]);
465 }
466
467 if (pTask && pTask->mm)
468 up_read(&pTask->mm->mmap_sem);
469 }
470 /* else: kernel memory - nothing to do here. */
471 break;
472
473 case RTR0MEMOBJTYPE_RES_VIRT:
474 Assert(pMemLnx->Core.pv);
475 if (pMemLnx->Core.u.ResVirt.R0Process != NIL_RTR0PROCESS)
476 {
477 struct task_struct *pTask = rtR0ProcessToLinuxTask(pMemLnx->Core.u.Lock.R0Process);
478 Assert(pTask);
479 if (pTask && pTask->mm)
480 {
481 down_write(&pTask->mm->mmap_sem);
482 MY_DO_MUNMAP(pTask->mm, (unsigned long)pMemLnx->Core.pv, pMemLnx->Core.cb);
483 up_write(&pTask->mm->mmap_sem);
484 }
485 }
486 else
487 {
488 vunmap(pMemLnx->Core.pv);
489
490 Assert(pMemLnx->cPages == 1 && pMemLnx->apPages[0] != NULL);
491 __free_page(pMemLnx->apPages[0]);
492 pMemLnx->apPages[0] = NULL;
493 pMemLnx->cPages = 0;
494 }
495 pMemLnx->Core.pv = NULL;
496 break;
497
498 case RTR0MEMOBJTYPE_MAPPING:
499 Assert(pMemLnx->cPages == 0); Assert(pMemLnx->Core.pv);
500 if (pMemLnx->Core.u.ResVirt.R0Process != NIL_RTR0PROCESS)
501 {
502 struct task_struct *pTask = rtR0ProcessToLinuxTask(pMemLnx->Core.u.Lock.R0Process);
503 Assert(pTask);
504 if (pTask && pTask->mm)
505 {
506 down_write(&pTask->mm->mmap_sem);
507 MY_DO_MUNMAP(pTask->mm, (unsigned long)pMemLnx->Core.pv, pMemLnx->Core.cb);
508 up_write(&pTask->mm->mmap_sem);
509 }
510 }
511 else
512 vunmap(pMemLnx->Core.pv);
513 pMemLnx->Core.pv = NULL;
514 break;
515
516 default:
517 AssertMsgFailed(("enmType=%d\n", pMemLnx->Core.enmType));
518 return VERR_INTERNAL_ERROR;
519 }
520 return VINF_SUCCESS;
521}
522
523
524int rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
525{
526 PRTR0MEMOBJLNX pMemLnx;
527 int rc;
528
529#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
530 rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_PAGE, cb, PAGE_SIZE, GFP_HIGHUSER, false /* non-contiguous */);
531#else
532 rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_PAGE, cb, PAGE_SIZE, GFP_USER, false /* non-contiguous */);
533#endif
534 if (RT_SUCCESS(rc))
535 {
536 rc = rtR0MemObjLinuxVMap(pMemLnx, fExecutable);
537 if (RT_SUCCESS(rc))
538 {
539 *ppMem = &pMemLnx->Core;
540 return rc;
541 }
542
543 rtR0MemObjLinuxFreePages(pMemLnx);
544 rtR0MemObjDelete(&pMemLnx->Core);
545 }
546
547 return rc;
548}
549
550
551int rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
552{
553 PRTR0MEMOBJLNX pMemLnx;
554 int rc;
555
556 /* Try to avoid GFP_DMA. GFM_DMA32 was introduced with Linux 2.6.15. */
557#if (defined(RT_ARCH_AMD64) || defined(CONFIG_X86_PAE)) && defined(GFP_DMA32)
558 /* ZONE_DMA32: 0-4GB */
559 rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_LOW, cb, PAGE_SIZE, GFP_DMA32, false /* non-contiguous */);
560 if (RT_FAILURE(rc))
561#endif
562#ifdef RT_ARCH_AMD64
563 /* ZONE_DMA: 0-16MB */
564 rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_LOW, cb, PAGE_SIZE, GFP_DMA, false /* non-contiguous */);
565#else
566# ifdef CONFIG_X86_PAE
567# endif
568 /* ZONE_NORMAL: 0-896MB */
569 rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_LOW, cb, PAGE_SIZE, GFP_USER, false /* non-contiguous */);
570#endif
571 if (RT_SUCCESS(rc))
572 {
573 rc = rtR0MemObjLinuxVMap(pMemLnx, fExecutable);
574 if (RT_SUCCESS(rc))
575 {
576 *ppMem = &pMemLnx->Core;
577 return rc;
578 }
579
580 rtR0MemObjLinuxFreePages(pMemLnx);
581 rtR0MemObjDelete(&pMemLnx->Core);
582 }
583
584 return rc;
585}
586
587
588int rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
589{
590 PRTR0MEMOBJLNX pMemLnx;
591 int rc;
592
593#if (defined(RT_ARCH_AMD64) || defined(CONFIG_X86_PAE)) && defined(GFP_DMA32)
594 /* ZONE_DMA32: 0-4GB */
595 rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_CONT, cb, PAGE_SIZE, GFP_DMA32, true /* contiguous */);
596 if (RT_FAILURE(rc))
597#endif
598#ifdef RT_ARCH_AMD64
599 /* ZONE_DMA: 0-16MB */
600 rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_CONT, cb, PAGE_SIZE, GFP_DMA, true /* contiguous */);
601#else
602 /* ZONE_NORMAL (32-bit hosts): 0-896MB */
603 rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_CONT, cb, PAGE_SIZE, GFP_USER, true /* contiguous */);
604#endif
605 if (RT_SUCCESS(rc))
606 {
607 rc = rtR0MemObjLinuxVMap(pMemLnx, fExecutable);
608 if (RT_SUCCESS(rc))
609 {
610#if defined(RT_STRICT) && (defined(RT_ARCH_AMD64) || defined(CONFIG_HIGHMEM64G))
611 size_t iPage = pMemLnx->cPages;
612 while (iPage-- > 0)
613 Assert(page_to_phys(pMemLnx->apPages[iPage]) < _4G);
614#endif
615 pMemLnx->Core.u.Cont.Phys = page_to_phys(pMemLnx->apPages[0]);
616 *ppMem = &pMemLnx->Core;
617 return rc;
618 }
619
620 rtR0MemObjLinuxFreePages(pMemLnx);
621 rtR0MemObjDelete(&pMemLnx->Core);
622 }
623
624 return rc;
625}
626
627
628/**
629 * Worker for rtR0MemObjLinuxAllocPhysSub that tries one allocation strategy.
630 *
631 * @returns IPRT status.
632 * @param ppMemLnx Where to
633 * @param enmType The object type.
634 * @param cb The size of the allocation.
635 * @param uAlignment The alignment of the physical memory.
636 * Only valid for fContiguous == true, ignored otherwise.
637 * @param PhysHighest See rtR0MemObjNativeAllocPhys.
638 * @param fGfp The Linux GFP flags to use for the allocation.
639 */
640static int rtR0MemObjLinuxAllocPhysSub2(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJTYPE enmType,
641 size_t cb, size_t uAlignment, RTHCPHYS PhysHighest, unsigned fGfp)
642{
643 PRTR0MEMOBJLNX pMemLnx;
644 int rc;
645
646 rc = rtR0MemObjLinuxAllocPages(&pMemLnx, enmType, cb, uAlignment, fGfp,
647 enmType == RTR0MEMOBJTYPE_PHYS /* contiguous / non-contiguous */);
648 if (RT_FAILURE(rc))
649 return rc;
650
651 /*
652 * Check the addresses if necessary. (Can be optimized a bit for PHYS.)
653 */
654 if (PhysHighest != NIL_RTHCPHYS)
655 {
656 size_t iPage = pMemLnx->cPages;
657 while (iPage-- > 0)
658 if (page_to_phys(pMemLnx->apPages[iPage]) >= PhysHighest)
659 {
660 rtR0MemObjLinuxFreePages(pMemLnx);
661 rtR0MemObjDelete(&pMemLnx->Core);
662 return VERR_NO_MEMORY;
663 }
664 }
665
666 /*
667 * Complete the object.
668 */
669 if (enmType == RTR0MEMOBJTYPE_PHYS)
670 {
671 pMemLnx->Core.u.Phys.PhysBase = page_to_phys(pMemLnx->apPages[0]);
672 pMemLnx->Core.u.Phys.fAllocated = true;
673 }
674 *ppMem = &pMemLnx->Core;
675 return rc;
676}
677
678
679/**
680 * Worker for rtR0MemObjNativeAllocPhys and rtR0MemObjNativeAllocPhysNC.
681 *
682 * @returns IPRT status.
683 * @param ppMem Where to store the memory object pointer on success.
684 * @param enmType The object type.
685 * @param cb The size of the allocation.
686 * @param uAlignment The alignment of the physical memory.
687 * Only valid for enmType == RTR0MEMOBJTYPE_PHYS, ignored otherwise.
688 * @param PhysHighest See rtR0MemObjNativeAllocPhys.
689 */
690static int rtR0MemObjLinuxAllocPhysSub(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJTYPE enmType,
691 size_t cb, size_t uAlignment, RTHCPHYS PhysHighest)
692{
693 int rc;
694
695 /*
696 * There are two clear cases and that's the <=16MB and anything-goes ones.
697 * When the physical address limit is somewhere in-between those two we'll
698 * just have to try, starting with HIGHUSER and working our way thru the
699 * different types, hoping we'll get lucky.
700 *
701 * We should probably move this physical address restriction logic up to
702 * the page alloc function as it would be more efficient there. But since
703 * we don't expect this to be a performance issue just yet it can wait.
704 */
705 if (PhysHighest == NIL_RTHCPHYS)
706 /* ZONE_HIGHMEM: the whole physical memory */
707 rc = rtR0MemObjLinuxAllocPhysSub2(ppMem, enmType, cb, uAlignment, PhysHighest, GFP_HIGHUSER);
708 else if (PhysHighest <= _1M * 16)
709 /* ZONE_DMA: 0-16MB */
710 rc = rtR0MemObjLinuxAllocPhysSub2(ppMem, enmType, cb, uAlignment, PhysHighest, GFP_DMA);
711 else
712 {
713 rc = VERR_NO_MEMORY;
714 if (RT_FAILURE(rc))
715 /* ZONE_HIGHMEM: the whole physical memory */
716 rc = rtR0MemObjLinuxAllocPhysSub2(ppMem, enmType, cb, uAlignment, PhysHighest, GFP_HIGHUSER);
717 if (RT_FAILURE(rc))
718 /* ZONE_NORMAL: 0-896MB */
719 rc = rtR0MemObjLinuxAllocPhysSub2(ppMem, enmType, cb, uAlignment, PhysHighest, GFP_USER);
720#ifdef GFP_DMA32
721 if (RT_FAILURE(rc))
722 /* ZONE_DMA32: 0-4GB */
723 rc = rtR0MemObjLinuxAllocPhysSub2(ppMem, enmType, cb, uAlignment, PhysHighest, GFP_DMA32);
724#endif
725 if (RT_FAILURE(rc))
726 /* ZONE_DMA: 0-16MB */
727 rc = rtR0MemObjLinuxAllocPhysSub2(ppMem, enmType, cb, uAlignment, PhysHighest, GFP_DMA);
728 }
729 return rc;
730}
731
732
733int rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
734{
735 return rtR0MemObjLinuxAllocPhysSub(ppMem, RTR0MEMOBJTYPE_PHYS, cb, uAlignment, PhysHighest);
736}
737
738
739int rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
740{
741 return rtR0MemObjLinuxAllocPhysSub(ppMem, RTR0MEMOBJTYPE_PHYS_NC, cb, PAGE_SIZE, PhysHighest);
742}
743
744
745int rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
746{
747 /*
748 * All we need to do here is to validate that we can use
749 * ioremap on the specified address (32/64-bit dma_addr_t).
750 */
751 PRTR0MEMOBJLNX pMemLnx;
752 dma_addr_t PhysAddr = Phys;
753 AssertMsgReturn(PhysAddr == Phys, ("%#llx\n", (unsigned long long)Phys), VERR_ADDRESS_TOO_BIG);
754
755 pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(sizeof(*pMemLnx), RTR0MEMOBJTYPE_PHYS, NULL, cb);
756 if (!pMemLnx)
757 return VERR_NO_MEMORY;
758
759 pMemLnx->Core.u.Phys.PhysBase = PhysAddr;
760 pMemLnx->Core.u.Phys.fAllocated = false;
761 pMemLnx->Core.u.Phys.uCachePolicy = uCachePolicy;
762 Assert(!pMemLnx->cPages);
763 *ppMem = &pMemLnx->Core;
764 return VINF_SUCCESS;
765}
766
767
768int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
769{
770 const int cPages = cb >> PAGE_SHIFT;
771 struct task_struct *pTask = rtR0ProcessToLinuxTask(R0Process);
772 struct vm_area_struct **papVMAs;
773 PRTR0MEMOBJLNX pMemLnx;
774 int rc = VERR_NO_MEMORY;
775 NOREF(fAccess);
776
777 /*
778 * Check for valid task and size overflows.
779 */
780 if (!pTask)
781 return VERR_NOT_SUPPORTED;
782 if (((size_t)cPages << PAGE_SHIFT) != cb)
783 return VERR_OUT_OF_RANGE;
784
785 /*
786 * Allocate the memory object and a temporary buffer for the VMAs.
787 */
788 pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJLNX, apPages[cPages]), RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb);
789 if (!pMemLnx)
790 return VERR_NO_MEMORY;
791
792 papVMAs = (struct vm_area_struct **)RTMemAlloc(sizeof(*papVMAs) * cPages);
793 if (papVMAs)
794 {
795 down_read(&pTask->mm->mmap_sem);
796
797 /*
798 * Get user pages.
799 */
800 rc = get_user_pages(pTask, /* Task for fault accounting. */
801 pTask->mm, /* Whose pages. */
802 R3Ptr, /* Where from. */
803 cPages, /* How many pages. */
804 1, /* Write to memory. */
805 0, /* force. */
806 &pMemLnx->apPages[0], /* Page array. */
807 papVMAs); /* vmas */
808 if (rc == cPages)
809 {
810 /*
811 * Flush dcache (required?), protect against fork and _really_ pin the page
812 * table entries. get_user_pages() will protect against swapping out the
813 * pages but it will NOT protect against removing page table entries. This
814 * can be achieved with
815 * - using mlock / mmap(..., MAP_LOCKED, ...) from userland. This requires
816 * an appropriate limit set up with setrlimit(..., RLIMIT_MEMLOCK, ...).
817 * Usual Linux distributions support only a limited size of locked pages
818 * (e.g. 32KB).
819 * - setting the PageReserved bit (as we do in rtR0MemObjLinuxAllocPages()
820 * or by
821 * - setting the VM_LOCKED flag. This is the same as doing mlock() without
822 * a range check.
823 */
824 /** @todo The Linux fork() protection will require more work if this API
825 * is to be used for anything but locking VM pages. */
826 while (rc-- > 0)
827 {
828 flush_dcache_page(pMemLnx->apPages[rc]);
829 papVMAs[rc]->vm_flags |= (VM_DONTCOPY | VM_LOCKED);
830 }
831
832 up_read(&pTask->mm->mmap_sem);
833
834 RTMemFree(papVMAs);
835
836 pMemLnx->Core.u.Lock.R0Process = R0Process;
837 pMemLnx->cPages = cPages;
838 Assert(!pMemLnx->fMappedToRing0);
839 *ppMem = &pMemLnx->Core;
840
841 return VINF_SUCCESS;
842 }
843
844 /*
845 * Failed - we need to unlock any pages that we succeeded to lock.
846 */
847 while (rc-- > 0)
848 {
849 if (!PageReserved(pMemLnx->apPages[rc]))
850 SetPageDirty(pMemLnx->apPages[rc]);
851 page_cache_release(pMemLnx->apPages[rc]);
852 }
853
854 up_read(&pTask->mm->mmap_sem);
855
856 RTMemFree(papVMAs);
857 rc = VERR_LOCK_FAILED;
858 }
859
860 rtR0MemObjDelete(&pMemLnx->Core);
861 return rc;
862}
863
864
865int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
866{
867 void *pvLast = (uint8_t *)pv + cb - 1;
868 size_t const cPages = cb >> PAGE_SHIFT;
869 PRTR0MEMOBJLNX pMemLnx;
870 bool fLinearMapping;
871 int rc;
872 uint8_t *pbPage;
873 size_t iPage;
874 NOREF(fAccess);
875
876 /*
877 * Classify the memory and check that we can deal with it.
878 */
879#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
880 fLinearMapping = virt_addr_valid(pvLast) && virt_addr_valid(pv);
881#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 0)
882 fLinearMapping = VALID_PAGE(virt_to_page(pvLast)) && VALID_PAGE(virt_to_page(pv));
883#else
884# error "not supported"
885#endif
886 if (!fLinearMapping)
887 {
888#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 19)
889 if ( !RTR0MemKernelIsValidAddr(pv)
890 || !RTR0MemKernelIsValidAddr(pv + cb))
891#endif
892 return VERR_INVALID_PARAMETER;
893 }
894
895 /*
896 * Allocate the memory object.
897 */
898 pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJLNX, apPages[cPages]), RTR0MEMOBJTYPE_LOCK, pv, cb);
899 if (!pMemLnx)
900 return VERR_NO_MEMORY;
901
902 /*
903 * Gather the pages.
904 * We ASSUME all kernel pages are non-swappable.
905 */
906 rc = VINF_SUCCESS;
907 pbPage = (uint8_t *)pvLast;
908 iPage = cPages;
909#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 19)
910 if (!fLinearMapping)
911 {
912 while (iPage-- > 0)
913 {
914 struct page *pPage = vmalloc_to_page(pbPage);
915 if (RT_UNLIKELY(!pPage))
916 {
917 rc = VERR_LOCK_FAILED;
918 break;
919 }
920 pMemLnx->apPages[iPage] = pPage;
921 pbPage -= PAGE_SIZE;
922 }
923 }
924 else
925#endif
926 {
927 while (iPage-- > 0)
928 {
929 pMemLnx->apPages[iPage] = virt_to_page(pbPage);
930 pbPage -= PAGE_SIZE;
931 }
932 }
933 if (RT_SUCCESS(rc))
934 {
935 /*
936 * Complete the memory object and return.
937 */
938 pMemLnx->Core.u.Lock.R0Process = NIL_RTR0PROCESS;
939 pMemLnx->cPages = cPages;
940 Assert(!pMemLnx->fMappedToRing0);
941 *ppMem = &pMemLnx->Core;
942
943 return VINF_SUCCESS;
944 }
945
946 rtR0MemObjDelete(&pMemLnx->Core);
947 return rc;
948}
949
950
951int rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
952{
953#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
954 const size_t cPages = cb >> PAGE_SHIFT;
955 struct page *pDummyPage;
956 struct page **papPages;
957
958 /* check for unsupported stuff. */
959 AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);
960 if (uAlignment > PAGE_SIZE)
961 return VERR_NOT_SUPPORTED;
962
963 /*
964 * Allocate a dummy page and create a page pointer array for vmap such that
965 * the dummy page is mapped all over the reserved area.
966 */
967 pDummyPage = alloc_page(GFP_HIGHUSER);
968 if (!pDummyPage)
969 return VERR_NO_MEMORY;
970 papPages = RTMemAlloc(sizeof(*papPages) * cPages);
971 if (papPages)
972 {
973 void *pv;
974 size_t iPage = cPages;
975 while (iPage-- > 0)
976 papPages[iPage] = pDummyPage;
977# ifdef VM_MAP
978 pv = vmap(papPages, cPages, VM_MAP, PAGE_KERNEL_RO);
979# else
980 pv = vmap(papPages, cPages, VM_ALLOC, PAGE_KERNEL_RO);
981# endif
982 RTMemFree(papPages);
983 if (pv)
984 {
985 PRTR0MEMOBJLNX pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(sizeof(*pMemLnx), RTR0MEMOBJTYPE_RES_VIRT, pv, cb);
986 if (pMemLnx)
987 {
988 pMemLnx->Core.u.ResVirt.R0Process = NIL_RTR0PROCESS;
989 pMemLnx->cPages = 1;
990 pMemLnx->apPages[0] = pDummyPage;
991 *ppMem = &pMemLnx->Core;
992 return VINF_SUCCESS;
993 }
994 vunmap(pv);
995 }
996 }
997 __free_page(pDummyPage);
998 return VERR_NO_MEMORY;
999
1000#else /* < 2.4.22 */
1001 /*
1002 * Could probably use ioremap here, but the caller is in a better position than us
1003 * to select some safe physical memory.
1004 */
1005 return VERR_NOT_SUPPORTED;
1006#endif
1007}
1008
1009
1010/**
1011 * Worker for rtR0MemObjNativeReserveUser and rtR0MemObjNativerMapUser that creates
1012 * an empty user space mapping.
1013 *
1014 * The caller takes care of acquiring the mmap_sem of the task.
1015 *
1016 * @returns Pointer to the mapping.
1017 * (void *)-1 on failure.
1018 * @param R3PtrFixed (RTR3PTR)-1 if anywhere, otherwise a specific location.
1019 * @param cb The size of the mapping.
1020 * @param uAlignment The alignment of the mapping.
1021 * @param pTask The Linux task to create this mapping in.
1022 * @param fProt The RTMEM_PROT_* mask.
1023 */
1024static void *rtR0MemObjLinuxDoMmap(RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, struct task_struct *pTask, unsigned fProt)
1025{
1026 unsigned fLnxProt;
1027 unsigned long ulAddr;
1028
1029 /*
1030 * Convert from IPRT protection to mman.h PROT_ and call do_mmap.
1031 */
1032 fProt &= (RTMEM_PROT_NONE | RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC);
1033 if (fProt == RTMEM_PROT_NONE)
1034 fLnxProt = PROT_NONE;
1035 else
1036 {
1037 fLnxProt = 0;
1038 if (fProt & RTMEM_PROT_READ)
1039 fLnxProt |= PROT_READ;
1040 if (fProt & RTMEM_PROT_WRITE)
1041 fLnxProt |= PROT_WRITE;
1042 if (fProt & RTMEM_PROT_EXEC)
1043 fLnxProt |= PROT_EXEC;
1044 }
1045
1046 if (R3PtrFixed != (RTR3PTR)-1)
1047 ulAddr = do_mmap(NULL, R3PtrFixed, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, 0);
1048 else
1049 {
1050 ulAddr = do_mmap(NULL, 0, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS, 0);
1051 if ( !(ulAddr & ~PAGE_MASK)
1052 && (ulAddr & (uAlignment - 1)))
1053 {
1054 /** @todo implement uAlignment properly... We'll probably need to make some dummy mappings to fill
1055 * up alignment gaps. This is of course complicated by fragmentation (which we might have cause
1056 * ourselves) and further by there begin two mmap strategies (top / bottom). */
1057 /* For now, just ignore uAlignment requirements... */
1058 }
1059 }
1060 if (ulAddr & ~PAGE_MASK) /* ~PAGE_MASK == PAGE_OFFSET_MASK */
1061 return (void *)-1;
1062 return (void *)ulAddr;
1063}
1064
1065
1066int rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
1067{
1068 PRTR0MEMOBJLNX pMemLnx;
1069 void *pv;
1070 struct task_struct *pTask = rtR0ProcessToLinuxTask(R0Process);
1071 if (!pTask)
1072 return VERR_NOT_SUPPORTED;
1073
1074 /*
1075 * Check that the specified alignment is supported.
1076 */
1077 if (uAlignment > PAGE_SIZE)
1078 return VERR_NOT_SUPPORTED;
1079
1080 /*
1081 * Let rtR0MemObjLinuxDoMmap do the difficult bits.
1082 */
1083 down_write(&pTask->mm->mmap_sem);
1084 pv = rtR0MemObjLinuxDoMmap(R3PtrFixed, cb, uAlignment, pTask, RTMEM_PROT_NONE);
1085 up_write(&pTask->mm->mmap_sem);
1086 if (pv == (void *)-1)
1087 return VERR_NO_MEMORY;
1088
1089 pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(sizeof(*pMemLnx), RTR0MEMOBJTYPE_RES_VIRT, pv, cb);
1090 if (!pMemLnx)
1091 {
1092 down_write(&pTask->mm->mmap_sem);
1093 MY_DO_MUNMAP(pTask->mm, (unsigned long)pv, cb);
1094 up_write(&pTask->mm->mmap_sem);
1095 return VERR_NO_MEMORY;
1096 }
1097
1098 pMemLnx->Core.u.ResVirt.R0Process = R0Process;
1099 *ppMem = &pMemLnx->Core;
1100 return VINF_SUCCESS;
1101}
1102
1103
1104int rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
1105 unsigned fProt, size_t offSub, size_t cbSub)
1106{
1107 int rc = VERR_NO_MEMORY;
1108 PRTR0MEMOBJLNX pMemLnxToMap = (PRTR0MEMOBJLNX)pMemToMap;
1109 PRTR0MEMOBJLNX pMemLnx;
1110
1111 /* Fail if requested to do something we can't. */
1112 AssertMsgReturn(!offSub && !cbSub, ("%#x %#x\n", offSub, cbSub), VERR_NOT_SUPPORTED);
1113 AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);
1114 if (uAlignment > PAGE_SIZE)
1115 return VERR_NOT_SUPPORTED;
1116
1117 /*
1118 * Create the IPRT memory object.
1119 */
1120 pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(sizeof(*pMemLnx), RTR0MEMOBJTYPE_MAPPING, NULL, pMemLnxToMap->Core.cb);
1121 if (pMemLnx)
1122 {
1123 if (pMemLnxToMap->cPages)
1124 {
1125#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
1126 /*
1127 * Use vmap - 2.4.22 and later.
1128 */
1129 pgprot_t fPg = rtR0MemObjLinuxConvertProt(fProt, true /* kernel */);
1130# ifdef VM_MAP
1131 pMemLnx->Core.pv = vmap(&pMemLnxToMap->apPages[0], pMemLnxToMap->cPages, VM_MAP, fPg);
1132# else
1133 pMemLnx->Core.pv = vmap(&pMemLnxToMap->apPages[0], pMemLnxToMap->cPages, VM_ALLOC, fPg);
1134# endif
1135 if (pMemLnx->Core.pv)
1136 {
1137 pMemLnx->fMappedToRing0 = true;
1138 rc = VINF_SUCCESS;
1139 }
1140 else
1141 rc = VERR_MAP_FAILED;
1142
1143#else /* < 2.4.22 */
1144 /*
1145 * Only option here is to share mappings if possible and forget about fProt.
1146 */
1147 if (rtR0MemObjIsRing3(pMemToMap))
1148 rc = VERR_NOT_SUPPORTED;
1149 else
1150 {
1151 rc = VINF_SUCCESS;
1152 if (!pMemLnxToMap->Core.pv)
1153 rc = rtR0MemObjLinuxVMap(pMemLnxToMap, !!(fProt & RTMEM_PROT_EXEC));
1154 if (RT_SUCCESS(rc))
1155 {
1156 Assert(pMemLnxToMap->Core.pv);
1157 pMemLnx->Core.pv = pMemLnxToMap->Core.pv;
1158 }
1159 }
1160#endif
1161 }
1162 else
1163 {
1164 /*
1165 * MMIO / physical memory.
1166 */
1167 Assert(pMemLnxToMap->Core.enmType == RTR0MEMOBJTYPE_PHYS && !pMemLnxToMap->Core.u.Phys.fAllocated);
1168 pMemLnx->Core.pv = pMemLnxToMap->Core.u.Phys.uCachePolicy == RTMEM_CACHE_POLICY_MMIO
1169 ? ioremap_nocache(pMemLnxToMap->Core.u.Phys.PhysBase, pMemLnxToMap->Core.cb)
1170 : ioremap(pMemLnxToMap->Core.u.Phys.PhysBase, pMemLnxToMap->Core.cb);
1171 if (pMemLnx->Core.pv)
1172 {
1173 /** @todo fix protection. */
1174 rc = VINF_SUCCESS;
1175 }
1176 }
1177 if (RT_SUCCESS(rc))
1178 {
1179 pMemLnx->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
1180 *ppMem = &pMemLnx->Core;
1181 return VINF_SUCCESS;
1182 }
1183 rtR0MemObjDelete(&pMemLnx->Core);
1184 }
1185
1186 return rc;
1187}
1188
1189
1190#ifdef VBOX_USE_PAE_HACK
1191/**
1192 * Replace the PFN of a PTE with the address of the actual page.
1193 *
1194 * The caller maps a reserved dummy page at the address with the desired access
1195 * and flags.
1196 *
1197 * This hack is required for older Linux kernels which don't provide
1198 * remap_pfn_range().
1199 *
1200 * @returns 0 on success, -ENOMEM on failure.
1201 * @param mm The memory context.
1202 * @param ulAddr The mapping address.
1203 * @param Phys The physical address of the page to map.
1204 */
1205static int rtR0MemObjLinuxFixPte(struct mm_struct *mm, unsigned long ulAddr, RTHCPHYS Phys)
1206{
1207 int rc = -ENOMEM;
1208 pgd_t *pgd;
1209
1210 spin_lock(&mm->page_table_lock);
1211
1212 pgd = pgd_offset(mm, ulAddr);
1213 if (!pgd_none(*pgd) && !pgd_bad(*pgd))
1214 {
1215 pmd_t *pmd = pmd_offset(pgd, ulAddr);
1216 if (!pmd_none(*pmd))
1217 {
1218 pte_t *ptep = pte_offset_map(pmd, ulAddr);
1219 if (ptep)
1220 {
1221 pte_t pte = *ptep;
1222 pte.pte_high &= 0xfff00000;
1223 pte.pte_high |= ((Phys >> 32) & 0x000fffff);
1224 pte.pte_low &= 0x00000fff;
1225 pte.pte_low |= (Phys & 0xfffff000);
1226 set_pte(ptep, pte);
1227 pte_unmap(ptep);
1228 rc = 0;
1229 }
1230 }
1231 }
1232
1233 spin_unlock(&mm->page_table_lock);
1234 return rc;
1235}
1236#endif /* VBOX_USE_PAE_HACK */
1237
1238
1239int rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
1240{
1241 struct task_struct *pTask = rtR0ProcessToLinuxTask(R0Process);
1242 PRTR0MEMOBJLNX pMemLnxToMap = (PRTR0MEMOBJLNX)pMemToMap;
1243 int rc = VERR_NO_MEMORY;
1244 PRTR0MEMOBJLNX pMemLnx;
1245#ifdef VBOX_USE_PAE_HACK
1246 struct page *pDummyPage;
1247 RTHCPHYS DummyPhys;
1248#endif
1249
1250 /*
1251 * Check for restrictions.
1252 */
1253 if (!pTask)
1254 return VERR_NOT_SUPPORTED;
1255 if (uAlignment > PAGE_SIZE)
1256 return VERR_NOT_SUPPORTED;
1257
1258#ifdef VBOX_USE_PAE_HACK
1259 /*
1260 * Allocate a dummy page for use when mapping the memory.
1261 */
1262 pDummyPage = alloc_page(GFP_USER);
1263 if (!pDummyPage)
1264 return VERR_NO_MEMORY;
1265 SetPageReserved(pDummyPage);
1266 DummyPhys = page_to_phys(pDummyPage);
1267#endif
1268
1269 /*
1270 * Create the IPRT memory object.
1271 */
1272 pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(sizeof(*pMemLnx), RTR0MEMOBJTYPE_MAPPING, NULL, pMemLnxToMap->Core.cb);
1273 if (pMemLnx)
1274 {
1275 /*
1276 * Allocate user space mapping.
1277 */
1278 void *pv;
1279 down_write(&pTask->mm->mmap_sem);
1280 pv = rtR0MemObjLinuxDoMmap(R3PtrFixed, pMemLnxToMap->Core.cb, uAlignment, pTask, fProt);
1281 if (pv != (void *)-1)
1282 {
1283 /*
1284 * Map page by page into the mmap area.
1285 * This is generic, paranoid and not very efficient.
1286 */
1287 pgprot_t fPg = rtR0MemObjLinuxConvertProt(fProt, false /* user */);
1288 unsigned long ulAddrCur = (unsigned long)pv;
1289 const size_t cPages = pMemLnxToMap->Core.cb >> PAGE_SHIFT;
1290 size_t iPage;
1291
1292 rc = 0;
1293 if (pMemLnxToMap->cPages)
1294 {
1295 for (iPage = 0; iPage < cPages; iPage++, ulAddrCur += PAGE_SIZE)
1296 {
1297#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11)
1298 RTHCPHYS Phys = page_to_phys(pMemLnxToMap->apPages[iPage]);
1299#endif
1300#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) || defined(HAVE_26_STYLE_REMAP_PAGE_RANGE)
1301 struct vm_area_struct *vma = find_vma(pTask->mm, ulAddrCur); /* this is probably the same for all the pages... */
1302 AssertBreakStmt(vma, rc = VERR_INTERNAL_ERROR);
1303#endif
1304#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) && defined(RT_ARCH_X86)
1305 /* remap_page_range() limitation on x86 */
1306 AssertBreakStmt(Phys < _4G, rc = VERR_NO_MEMORY);
1307#endif
1308
1309#if defined(VBOX_USE_INSERT_PAGE) && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
1310 rc = vm_insert_page(vma, ulAddrCur, pMemLnxToMap->apPages[iPage]);
1311 vma->vm_flags |= VM_RESERVED; /* This flag helps making 100% sure some bad stuff wont happen (swap, core, ++). */
1312#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
1313 rc = remap_pfn_range(vma, ulAddrCur, page_to_pfn(pMemLnxToMap->apPages[iPage]), PAGE_SIZE, fPg);
1314#elif defined(VBOX_USE_PAE_HACK)
1315 rc = remap_page_range(vma, ulAddrCur, DummyPhys, PAGE_SIZE, fPg);
1316 if (!rc)
1317 rc = rtR0MemObjLinuxFixPte(pTask->mm, ulAddrCur, Phys);
1318#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) || defined(HAVE_26_STYLE_REMAP_PAGE_RANGE)
1319 rc = remap_page_range(vma, ulAddrCur, Phys, PAGE_SIZE, fPg);
1320#else /* 2.4 */
1321 rc = remap_page_range(ulAddrCur, Phys, PAGE_SIZE, fPg);
1322#endif
1323 if (rc)
1324 {
1325 rc = VERR_NO_MEMORY;
1326 break;
1327 }
1328 }
1329 }
1330 else
1331 {
1332 RTHCPHYS Phys;
1333 if (pMemLnxToMap->Core.enmType == RTR0MEMOBJTYPE_PHYS)
1334 Phys = pMemLnxToMap->Core.u.Phys.PhysBase;
1335 else if (pMemLnxToMap->Core.enmType == RTR0MEMOBJTYPE_CONT)
1336 Phys = pMemLnxToMap->Core.u.Cont.Phys;
1337 else
1338 {
1339 AssertMsgFailed(("%d\n", pMemLnxToMap->Core.enmType));
1340 Phys = NIL_RTHCPHYS;
1341 }
1342 if (Phys != NIL_RTHCPHYS)
1343 {
1344 for (iPage = 0; iPage < cPages; iPage++, ulAddrCur += PAGE_SIZE, Phys += PAGE_SIZE)
1345 {
1346#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) || defined(HAVE_26_STYLE_REMAP_PAGE_RANGE)
1347 struct vm_area_struct *vma = find_vma(pTask->mm, ulAddrCur); /* this is probably the same for all the pages... */
1348 AssertBreakStmt(vma, rc = VERR_INTERNAL_ERROR);
1349#endif
1350#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) && defined(RT_ARCH_X86)
1351 /* remap_page_range() limitation on x86 */
1352 AssertBreakStmt(Phys < _4G, rc = VERR_NO_MEMORY);
1353#endif
1354
1355#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
1356 rc = remap_pfn_range(vma, ulAddrCur, Phys, PAGE_SIZE, fPg);
1357#elif defined(VBOX_USE_PAE_HACK)
1358 rc = remap_page_range(vma, ulAddrCur, DummyPhys, PAGE_SIZE, fPg);
1359 if (!rc)
1360 rc = rtR0MemObjLinuxFixPte(pTask->mm, ulAddrCur, Phys);
1361#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) || defined(HAVE_26_STYLE_REMAP_PAGE_RANGE)
1362 rc = remap_page_range(vma, ulAddrCur, Phys, PAGE_SIZE, fPg);
1363#else /* 2.4 */
1364 rc = remap_page_range(ulAddrCur, Phys, PAGE_SIZE, fPg);
1365#endif
1366 if (rc)
1367 {
1368 rc = VERR_NO_MEMORY;
1369 break;
1370 }
1371 }
1372 }
1373 }
1374 if (!rc)
1375 {
1376 up_write(&pTask->mm->mmap_sem);
1377#ifdef VBOX_USE_PAE_HACK
1378 __free_page(pDummyPage);
1379#endif
1380
1381 pMemLnx->Core.pv = pv;
1382 pMemLnx->Core.u.Mapping.R0Process = R0Process;
1383 *ppMem = &pMemLnx->Core;
1384 return VINF_SUCCESS;
1385 }
1386
1387 /*
1388 * Bail out.
1389 */
1390 MY_DO_MUNMAP(pTask->mm, (unsigned long)pv, pMemLnxToMap->Core.cb);
1391 }
1392 up_write(&pTask->mm->mmap_sem);
1393 rtR0MemObjDelete(&pMemLnx->Core);
1394 }
1395#ifdef VBOX_USE_PAE_HACK
1396 __free_page(pDummyPage);
1397#endif
1398
1399 return rc;
1400}
1401
1402
1403int rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
1404{
1405 NOREF(pMem);
1406 NOREF(offSub);
1407 NOREF(cbSub);
1408 NOREF(fProt);
1409 return VERR_NOT_SUPPORTED;
1410}
1411
1412
1413RTHCPHYS rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
1414{
1415 PRTR0MEMOBJLNX pMemLnx = (PRTR0MEMOBJLNX)pMem;
1416
1417 if (pMemLnx->cPages)
1418 return page_to_phys(pMemLnx->apPages[iPage]);
1419
1420 switch (pMemLnx->Core.enmType)
1421 {
1422 case RTR0MEMOBJTYPE_CONT:
1423 return pMemLnx->Core.u.Cont.Phys + (iPage << PAGE_SHIFT);
1424
1425 case RTR0MEMOBJTYPE_PHYS:
1426 return pMemLnx->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT);
1427
1428 /* the parent knows */
1429 case RTR0MEMOBJTYPE_MAPPING:
1430 return rtR0MemObjNativeGetPagePhysAddr(pMemLnx->Core.uRel.Child.pParent, iPage);
1431
1432 /* cPages > 0 */
1433 case RTR0MEMOBJTYPE_LOW:
1434 case RTR0MEMOBJTYPE_LOCK:
1435 case RTR0MEMOBJTYPE_PHYS_NC:
1436 case RTR0MEMOBJTYPE_PAGE:
1437 default:
1438 AssertMsgFailed(("%d\n", pMemLnx->Core.enmType));
1439 /* fall thru */
1440
1441 case RTR0MEMOBJTYPE_RES_VIRT:
1442 return NIL_RTHCPHYS;
1443 }
1444}
1445
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette