Changeset 85504 in vbox for trunk/src/VBox/Runtime/r0drv
- Timestamp:
- Jul 29, 2020 10:02:13 AM (4 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c
r85430 r85504 93 93 *********************************************************************************************************************************/ 94 94 /** 95 * The Darwinversion of the memory object structure.95 * The Linux version of the memory object structure. 96 96 */ 97 97 typedef struct RTR0MEMOBJLNX … … 106 106 /** Set if we've vmap'ed the memory into ring-0. */ 107 107 bool fMappedToRing0; 108 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) 109 /** Return from alloc_vm_area() that we now need to use for executable 110 * memory. */ 111 struct vm_struct *pArea; 112 /** PTE array that goes along with pArea (must be freed). */ 113 pte_t **papPtesForArea; 114 #endif 108 115 /** The pages in the apPages array. */ 109 116 size_t cPages; 110 117 /** Array of struct page pointers. (variable size) */ 111 118 struct page *apPages[1]; 112 } RTR0MEMOBJLNX, *PRTR0MEMOBJLNX; 119 } RTR0MEMOBJLNX; 120 /** Pointer to the linux memory object. */ 121 typedef RTR0MEMOBJLNX *PRTR0MEMOBJLNX; 113 122 114 123 … … 536 545 # endif 537 546 547 # if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) 548 if (fExecutable) 549 { 550 pte_t **papPtes = (pte_t **)kmalloc_array(pMemLnx->cPages, sizeof(papPtes[0]), GFP_KERNEL); 551 if (papPtes) 552 { 553 pMemLnx->pArea = alloc_vm_area(pMemLnx->Core.cb, papPtes); /* Note! pArea->nr_pages is not set. */ 554 if (pMemLnx->pArea) 555 { 556 size_t i; 557 Assert(pMemLnx->pArea->size >= pMemLnx->Core.cb); /* Note! includes guard page. */ 558 Assert(pMemLnx->pArea->addr); 559 # ifdef _PAGE_NX 560 pgprot_val(fPg) |= _PAGE_NX; /* Uses RTR0MemObjProtect to clear NX when memory ready, W^X fashion. */ 561 # endif 562 pMemLnx->papPtesForArea = papPtes; 563 for (i = 0; i < pMemLnx->cPages; i++) 564 *papPtes[i] = mk_pte(pMemLnx->apPages[i], fPg); 565 pMemLnx->Core.pv = pMemLnx->pArea->addr; 566 pMemLnx->fMappedToRing0 = true; 567 } 568 else 569 { 570 kfree(papPtes); 571 rc = VERR_MAP_FAILED; 572 } 573 } 574 else 575 rc = VERR_MAP_FAILED; 576 } 577 else 578 # endif 579 { 538 580 # ifdef VM_MAP 539 pMemLnx->Core.pv = vmap(&pMemLnx->apPages[0], pMemLnx->cPages, VM_MAP, fPg);581 pMemLnx->Core.pv = vmap(&pMemLnx->apPages[0], pMemLnx->cPages, VM_MAP, fPg); 540 582 # else 541 pMemLnx->Core.pv = vmap(&pMemLnx->apPages[0], pMemLnx->cPages, VM_ALLOC, fPg);583 pMemLnx->Core.pv = vmap(&pMemLnx->apPages[0], pMemLnx->cPages, VM_ALLOC, fPg); 542 584 # endif 543 if (pMemLnx->Core.pv) 544 pMemLnx->fMappedToRing0 = true; 545 else 546 rc = VERR_MAP_FAILED; 585 if (pMemLnx->Core.pv) 586 pMemLnx->fMappedToRing0 = true; 587 else 588 rc = VERR_MAP_FAILED; 589 } 547 590 #else /* < 2.4.22 */ 548 591 rc = VERR_NOT_SUPPORTED; … … 570 613 { 571 614 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22) 615 # if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) 616 if (pMemLnx->pArea) 617 { 618 # if 0 619 pte_t **papPtes = pMemLnx->papPtesForArea; 620 size_t i; 621 for (i = 0; i < pMemLnx->cPages; i++) 622 *papPtes[i] = 0; 623 # endif 624 free_vm_area(pMemLnx->pArea); 625 kfree(pMemLnx->papPtesForArea); 626 pMemLnx->pArea = NULL; 627 pMemLnx->papPtesForArea = NULL; 628 } 629 else 630 # endif 572 631 if (pMemLnx->fMappedToRing0) 573 632 { … … 1438 1497 */ 1439 1498 pgprot_t fPg = rtR0MemObjLinuxConvertProt(fProt, true /* kernel */); 1499 /** @todo We don't really care too much for EXEC here... 5.8 always adds NX. */ 1440 1500 Assert(((offSub + cbSub) >> PAGE_SHIFT) <= pMemLnxToMap->cPages); 1441 1501 # ifdef VM_MAP … … 1769 1829 DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt) 1770 1830 { 1831 # if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) 1832 /* 1833 * Currently only supported when we've got addresses PTEs from the kernel. 1834 */ 1835 PRTR0MEMOBJLNX pMemLnx = (PRTR0MEMOBJLNX)pMem; 1836 if (pMemLnx->pArea && pMemLnx->papPtesForArea) 1837 { 1838 pgprot_t const fPg = rtR0MemObjLinuxConvertProt(fProt, true /*fKernel*/); 1839 size_t const cPages = (offSub + cbSub) >> PAGE_SHIFT; 1840 pte_t **papPtes = pMemLnx->papPtesForArea; 1841 size_t i; 1842 1843 for (i = offSub >> PAGE_SHIFT; i < cPages; i++) 1844 { 1845 set_pte(papPtes[i], mk_pte(pMemLnx->apPages[i], fPg)); 1846 } 1847 preempt_disable(); 1848 __flush_tlb_all(); 1849 preempt_enable(); 1850 return VINF_SUCCESS; 1851 } 1852 # endif 1853 1771 1854 NOREF(pMem); 1772 1855 NOREF(offSub);
Note:
See TracChangeset
for help on using the changeset viewer.