Changeset 85698 in vbox for trunk/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c
- Timestamp:
- Aug 11, 2020 5:05:29 PM (5 years ago)
- svn:sync-xref-src-repo-rev:
- 139838
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c
r85516 r85698 57 57 * This is a must for 5.8+, but we enable it all the way back to 3.2.x for 58 58 * better W^R compliance (fExecutable flag). */ 59 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2,0) || defined(DOXYGEN_RUNNING)59 #if RTLNX_VER_MIN(3,2,0) || defined(DOXYGEN_RUNNING) 60 60 # define IPRT_USE_ALLOC_VM_AREA_FOR_EXEC 61 61 #endif … … 66 66 * It should be safe to use vm_insert_page() older kernels as well. 67 67 */ 68 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6,23)68 #if RTLNX_VER_MIN(2,6,23) 69 69 # define VBOX_USE_INSERT_PAGE 70 70 #endif 71 71 #if defined(CONFIG_X86_PAE) \ 72 72 && ( defined(HAVE_26_STYLE_REMAP_PAGE_RANGE) \ 73 || ( LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) \ 74 && LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11))) 73 || RTLNX_VER_RANGE(2,6,0, 2,6,11) ) 75 74 # define VBOX_USE_PAE_HACK 76 75 #endif 77 76 78 77 /* gfp_t was introduced in 2.6.14, define it for earlier. */ 79 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6,14)78 #if RTLNX_VER_MAX(2,6,14) 80 79 # define gfp_t unsigned 81 80 #endif … … 84 83 * Wrappers around mmap_lock/mmap_sem difference. 85 84 */ 86 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8,0)85 #if RTLNX_VER_MIN(5,8,0) 87 86 # define LNX_MM_DOWN_READ(a_pMm) down_read(&(a_pMm)->mmap_lock) 88 87 # define LNX_MM_UP_READ(a_pMm) up_read(&(a_pMm)->mmap_lock) … … 252 251 if (R3PtrFixed != (RTR3PTR)-1) 253 252 { 254 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5,0)253 #if RTLNX_VER_MIN(3,5,0) 255 254 ulAddr = vm_mmap(NULL, R3PtrFixed, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, 0); 256 255 #else … … 262 261 else 263 262 { 264 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5,0)263 #if RTLNX_VER_MIN(3,5,0) 265 264 ulAddr = vm_mmap(NULL, 0, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS, 0); 266 265 #else … … 298 297 static void rtR0MemObjLinuxDoMunmap(void *pv, size_t cb, struct task_struct *pTask) 299 298 { 300 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5,0)299 #if RTLNX_VER_MIN(3,5,0) 301 300 Assert(pTask == current); RT_NOREF_PV(pTask); 302 301 vm_munmap((unsigned long)pv, cb); … … 359 358 * For small allocations we'll try contiguous first and then fall back on page by page. 360 359 */ 361 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4,22)360 #if RTLNX_VER_MIN(2,4,22) 362 361 if ( fContiguous 363 362 || cb <= PAGE_SIZE * 2) … … 418 417 pMemLnx->fExecutable = fExecutable; 419 418 420 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5,0)419 #if RTLNX_VER_MAX(4,5,0) 421 420 /* 422 421 * Reserve the pages. … … 475 474 while (iPage-- > 0) 476 475 { 477 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5,0)476 #if RTLNX_VER_MAX(4,5,0) 478 477 /* See SetPageReserved() in rtR0MemObjLinuxAllocPages() */ 479 478 ClearPageReserved(pMemLnx->apPages[iPage]); 480 479 #endif 481 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4,22)480 #if RTLNX_VER_MAX(2,4,22) 482 481 if (pMemLnx->fExecutable) 483 482 MY_SET_PAGES_NOEXEC(pMemLnx->apPages[iPage], 1); … … 488 487 * Free the pages. 489 488 */ 490 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4,22)489 #if RTLNX_VER_MIN(2,4,22) 491 490 if (!pMemLnx->fContiguous) 492 491 { … … 545 544 * Use vmap - 2.4.22 and later. 546 545 */ 547 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4,22)546 #if RTLNX_VER_MIN(2,4,22) 548 547 pgprot_t fPg; 549 548 pgprot_val(fPg) = _PAGE_PRESENT | _PAGE_RW; … … 620 619 static void rtR0MemObjLinuxVUnmap(PRTR0MEMOBJLNX pMemLnx) 621 620 { 622 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4,22)621 #if RTLNX_VER_MIN(2,4,22) 623 622 # ifdef IPRT_USE_ALLOC_VM_AREA_FOR_EXEC 624 623 if (pMemLnx->pArea) … … 683 682 if (!PageReserved(pMemLnx->apPages[iPage])) 684 683 SetPageDirty(pMemLnx->apPages[iPage]); 685 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6,0)684 #if RTLNX_VER_MIN(4,6,0) 686 685 put_page(pMemLnx->apPages[iPage]); 687 686 #else … … 746 745 int rc; 747 746 748 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4,22)747 #if RTLNX_VER_MIN(2,4,22) 749 748 rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_PAGE, cb, PAGE_SIZE, GFP_HIGHUSER, 750 749 false /* non-contiguous */, fExecutable, VERR_NO_MEMORY); … … 995 994 { 996 995 pgd_t Global; 997 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12,0)996 #if RTLNX_VER_MIN(4,12,0) 998 997 p4d_t Four; 999 998 #endif 1000 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6,11)999 #if RTLNX_VER_MIN(2,6,11) 1001 1000 pud_t Upper; 1002 1001 #endif … … 1013 1012 if (RT_UNLIKELY(pgd_none(u.Global))) 1014 1013 return NULL; 1015 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6,11)1016 # if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12,0)1014 #if RTLNX_VER_MIN(2,6,11) 1015 # if RTLNX_VER_MIN(4,12,0) 1017 1016 u.Four = *p4d_offset(&u.Global, ulAddr); 1018 1017 if (RT_UNLIKELY(p4d_none(u.Four))) … … 1033 1032 if (RT_UNLIKELY(pud_none(u.Upper))) 1034 1033 return NULL; 1035 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6,25)1034 # if RTLNX_VER_MIN(2,6,25) 1036 1035 if (pud_large(u.Upper)) 1037 1036 { … … 1049 1048 if (RT_UNLIKELY(pmd_none(u.Middle))) 1050 1049 return NULL; 1051 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6,0)1050 #if RTLNX_VER_MIN(2,6,0) 1052 1051 if (pmd_large(u.Middle)) 1053 1052 { … … 1060 1059 #endif 1061 1060 1062 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5,5) || defined(pte_offset_map) /* As usual, RHEL 3 had pte_offset_map earlier. */1061 #if RTLNX_VER_MIN(2,5,5) || defined(pte_offset_map) /* As usual, RHEL 3 had pte_offset_map earlier. */ 1063 1062 pEntry = pte_offset_map(&u.Middle, ulAddr); 1064 1063 #else … … 1068 1067 return NULL; 1069 1068 u.Entry = *pEntry; 1070 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5,5) || defined(pte_offset_map)1069 #if RTLNX_VER_MIN(2,5,5) || defined(pte_offset_map) 1071 1070 pte_unmap(pEntry); 1072 1071 #endif … … 1120 1119 1121 1120 /* openSUSE Leap 42.3 detection :-/ */ 1122 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) \ 1123 && LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) \ 1124 && defined(FAULT_FLAG_REMOTE) 1121 #if RTLNX_VER_RANGE(4,4,0, 4,6,0) && defined(FAULT_FLAG_REMOTE) 1125 1122 # define GET_USER_PAGES_API KERNEL_VERSION(4, 10, 0) /* no typo! */ 1126 1123 #else … … 1134 1131 struct task_struct *pTask = rtR0ProcessToLinuxTask(R0Process); 1135 1132 struct vm_area_struct **papVMAs; 1136 PRTR0MEMOBJLNX pMemLnx;1133 PRTR0MEMOBJLNX pMemLnx; 1137 1134 int rc = VERR_NO_MEMORY; 1138 1135 int const fWrite = fAccess & RTMEM_PROT_WRITE ? 1 : 0; … … 1209 1206 cPages, /* How many pages. */ 1210 1207 /* The get_user_pages API change was back-ported to 4.4.168. */ 1211 # if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 168) \ 1212 && LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) 1208 # if RTLNX_VER_RANGE(4,4,168, 4,5,0) 1213 1209 fWrite ? FOLL_WRITE | /* Write to memory. */ 1214 1210 FOLL_FORCE /* force write access. */ … … 1265 1261 if (!PageReserved(pMemLnx->apPages[rc])) 1266 1262 SetPageDirty(pMemLnx->apPages[rc]); 1267 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6,0)1263 #if RTLNX_VER_MIN(4,6,0) 1268 1264 put_page(pMemLnx->apPages[rc]); 1269 1265 #else … … 1375 1371 DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment) 1376 1372 { 1377 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4,22)1373 #if RTLNX_VER_MIN(2,4,22) 1378 1374 IPRT_LINUX_SAVE_EFL_AC(); 1379 1375 const size_t cPages = cb >> PAGE_SHIFT; … … 1500 1496 if (pMemLnxToMap->cPages) 1501 1497 { 1502 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4,22)1498 #if RTLNX_VER_MIN(2,4,22) 1503 1499 /* 1504 1500 * Use vmap - 2.4.22 and later. … … 1545 1541 */ 1546 1542 Assert(pMemLnxToMap->Core.enmType == RTR0MEMOBJTYPE_PHYS && !pMemLnxToMap->Core.u.Phys.fAllocated); 1547 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6,25)1543 #if RTLNX_VER_MIN(2,6,25) 1548 1544 /* 1549 1545 * ioremap() defaults to no caching since the 2.6 kernels. … … 1695 1691 for (iPage = offSub >> PAGE_SHIFT; iPage < cPages; iPage++, ulAddrCur += PAGE_SIZE) 1696 1692 { 1697 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6,11)1693 #if RTLNX_VER_MAX(2,6,11) 1698 1694 RTHCPHYS Phys = page_to_phys(pMemLnxToMap->apPages[iPage]); 1699 1695 #endif 1700 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6,0) || defined(HAVE_26_STYLE_REMAP_PAGE_RANGE)1696 #if RTLNX_VER_MIN(2,6,0) || defined(HAVE_26_STYLE_REMAP_PAGE_RANGE) 1701 1697 struct vm_area_struct *vma = find_vma(pTask->mm, ulAddrCur); /* this is probably the same for all the pages... */ 1702 1698 AssertBreakStmt(vma, rc = VERR_INTERNAL_ERROR); 1703 1699 #endif 1704 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6,0) && defined(RT_ARCH_X86)1700 #if RTLNX_VER_MAX(2,6,0) && defined(RT_ARCH_X86) 1705 1701 /* remap_page_range() limitation on x86 */ 1706 1702 AssertBreakStmt(Phys < _4G, rc = VERR_NO_MEMORY); 1707 1703 #endif 1708 1704 1709 #if defined(VBOX_USE_INSERT_PAGE) && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6,22)1705 #if defined(VBOX_USE_INSERT_PAGE) && RTLNX_VER_MIN(2,6,22) 1710 1706 rc = vm_insert_page(vma, ulAddrCur, pMemLnxToMap->apPages[iPage]); 1711 1707 /* Thes flags help making 100% sure some bad stuff wont happen (swap, core, ++). 1712 1708 * See remap_pfn_range() in mm/memory.c */ 1713 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7,0)1709 #if RTLNX_VER_MIN(3,7,0) 1714 1710 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 1715 1711 #else 1716 1712 vma->vm_flags |= VM_RESERVED; 1717 1713 #endif 1718 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6,11)1714 #elif RTLNX_VER_MIN(2,6,11) 1719 1715 rc = remap_pfn_range(vma, ulAddrCur, page_to_pfn(pMemLnxToMap->apPages[iPage]), PAGE_SIZE, fPg); 1720 1716 #elif defined(VBOX_USE_PAE_HACK) … … 1722 1718 if (!rc) 1723 1719 rc = rtR0MemObjLinuxFixPte(pTask->mm, ulAddrCur, Phys); 1724 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6,0) || defined(HAVE_26_STYLE_REMAP_PAGE_RANGE)1720 #elif RTLNX_VER_MIN(2,6,0) || defined(HAVE_26_STYLE_REMAP_PAGE_RANGE) 1725 1721 rc = remap_page_range(vma, ulAddrCur, Phys, PAGE_SIZE, fPg); 1726 1722 #else /* 2.4 */ … … 1750 1746 for (iPage = offSub >> PAGE_SHIFT; iPage < cPages; iPage++, ulAddrCur += PAGE_SIZE, Phys += PAGE_SIZE) 1751 1747 { 1752 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6,0) || defined(HAVE_26_STYLE_REMAP_PAGE_RANGE)1748 #if RTLNX_VER_MIN(2,6,0) || defined(HAVE_26_STYLE_REMAP_PAGE_RANGE) 1753 1749 struct vm_area_struct *vma = find_vma(pTask->mm, ulAddrCur); /* this is probably the same for all the pages... */ 1754 1750 AssertBreakStmt(vma, rc = VERR_INTERNAL_ERROR); 1755 1751 #endif 1756 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6,0) && defined(RT_ARCH_X86)1752 #if RTLNX_VER_MAX(2,6,0) && defined(RT_ARCH_X86) 1757 1753 /* remap_page_range() limitation on x86 */ 1758 1754 AssertBreakStmt(Phys < _4G, rc = VERR_NO_MEMORY); 1759 1755 #endif 1760 1756 1761 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6,11)1757 #if RTLNX_VER_MIN(2,6,11) 1762 1758 rc = remap_pfn_range(vma, ulAddrCur, Phys, PAGE_SIZE, fPg); 1763 1759 #elif defined(VBOX_USE_PAE_HACK) … … 1765 1761 if (!rc) 1766 1762 rc = rtR0MemObjLinuxFixPte(pTask->mm, ulAddrCur, Phys); 1767 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6,0) || defined(HAVE_26_STYLE_REMAP_PAGE_RANGE)1763 #elif RTLNX_VER_MIN(2,6,0) || defined(HAVE_26_STYLE_REMAP_PAGE_RANGE) 1768 1764 rc = remap_page_range(vma, ulAddrCur, Phys, PAGE_SIZE, fPg); 1769 1765 #else /* 2.4 */ … … 1780 1776 1781 1777 #ifdef CONFIG_NUMA_BALANCING 1782 # if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13,0)1778 # if RTLNX_VER_MAX(3,13,0) 1783 1779 # ifdef RHEL_RELEASE_CODE 1784 1780 # if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 0)
Note:
See TracChangeset
for help on using the changeset viewer.