VirtualBox

Ignore:
Timestamp:
Nov 6, 2021 3:10:49 AM (3 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
148086
Message:

IPRT/RTR0MemObj: Added RTR0MemObjWasZeroInitialized and a couple of flags with which the backend can feed it the necessary info. It would be good to try avoid zeroing memory twice when we can. bugref:10093

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/Runtime/r0drv/netbsd/memobj-r0drv-netbsd.c

    r91483 r92246  
    189189
    190190static int rtR0MemObjNetBSDAllocHelper(PRTR0MEMOBJNETBSD pMemNetBSD, size_t cb, bool fExecutable,
    191                                          paddr_t VmPhysAddrHigh, bool fContiguous)
     191                                       paddr_t VmPhysAddrHigh, bool fContiguous)
    192192{
    193193    /* Virtual space first */
    194     vaddr_t virt = uvm_km_alloc(kernel_map, cb, 0,
    195             UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_CANFAIL);
     194    vaddr_t virt = uvm_km_alloc(kernel_map, cb, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_CANFAIL);
    196195    if (virt == 0)
    197196        return VERR_NO_MEMORY;
     
    202201
    203202    /* Physical pages */
    204     if (uvm_pglistalloc(cb, 0, VmPhysAddrHigh,
    205             PAGE_SIZE, 0, rlist, nsegs, 1) != 0)
     203    if (uvm_pglistalloc(cb, 0, VmPhysAddrHigh, PAGE_SIZE, 0, rlist, nsegs, 1) != 0)
    206204    {
    207205        uvm_km_free(kernel_map, virt, cb, UVM_KMF_VAONLY);
    208         return VERR_NO_MEMORY;
     206        return VERR_NO_MEMORY; /** @todo inaccurate status code */
    209207    }
    210208
     
    221219    }
    222220
     221    pMemNetBSD->Core.fFlags |= RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC; /*?*/
    223222    pMemNetBSD->Core.pv = (void *)virt;
    224223    if (fContiguous)
     
    234233{
    235234    PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_PAGE, NULL, cb, pszTag);
    236     if (!pMemNetBSD)
    237         return VERR_NO_MEMORY;
    238 
    239     void *pvMem = kmem_alloc(cb, KM_SLEEP);
    240     if (RT_UNLIKELY(!pvMem))
    241     {
     235    if (pMemNetBSD)
     236    {
     237        void *pvMem = kmem_alloc(cb, KM_SLEEP);
     238        if (pvMem)
     239        {
     240            if (fExecutable)
     241                pmap_protect(pmap_kernel(), (vaddr_t)pvMem, (vaddr_t)pvMem + cb, VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE);
     242
     243            pMemNetBSD->Core.fFlags |= RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC;
     244            pMemNetBSD->Core.pv      = pvMem;
     245            *ppMem = &pMemNetBSD->Core;
     246            return VINF_SUCCESS;
     247        }
    242248        rtR0MemObjDelete(&pMemNetBSD->Core);
    243249        return VERR_NO_PAGE_MEMORY;
    244250    }
    245     if (fExecutable)
    246     {
    247         pmap_protect(pmap_kernel(), (vaddr_t)pvMem, ((vaddr_t)pvMem) + cb,
    248                 VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE);
    249     }
    250 
    251     pMemNetBSD->Core.pv = pvMem;
    252     *ppMem = &pMemNetBSD->Core;
    253     return VINF_SUCCESS;
     251    return VERR_NO_MEMORY;
    254252}
    255253
     
    265263{
    266264    PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_LOW, NULL, cb, pszTag);
    267     if (!pMemNetBSD)
    268         return VERR_NO_MEMORY;
    269 
    270     int rc = rtR0MemObjNetBSDAllocHelper(pMemNetBSD, cb, fExecutable, _4G - 1, false);
    271     if (rc)
    272     {
     265    if (pMemNetBSD)
     266    {
     267        int rc = rtR0MemObjNetBSDAllocHelper(pMemNetBSD, cb, fExecutable, _4G - 1, false /*fContiguous*/);
     268        if (RT_SUCCESS(rc))
     269        {
     270            *ppMem = &pMemNetBSD->Core;
     271            return VINF_SUCCESS;
     272        }
    273273        rtR0MemObjDelete(&pMemNetBSD->Core);
    274274        return rc;
    275275    }
    276 
    277     *ppMem = &pMemNetBSD->Core;
    278     return VINF_SUCCESS;
     276    return VERR_NO_MEMORY;
    279277}
    280278
     
    283281{
    284282    PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_CONT, NULL, cb, pszTag);
    285     if (!pMemNetBSD)
    286         return VERR_NO_MEMORY;
    287 
    288     int rc = rtR0MemObjNetBSDAllocHelper(pMemNetBSD, cb, fExecutable, _4G - 1, true);
    289     if (rc)
    290     {
     283    if (pMemNetBSD)
     284    {
     285        int rc = rtR0MemObjNetBSDAllocHelper(pMemNetBSD, cb, fExecutable, _4G - 1, true /*fContiguous*/);
     286        if (RT_SUCCESS(rc))
     287        {
     288            *ppMem = &pMemNetBSD->Core;
     289            return VINF_SUCCESS;
     290        }
    291291        rtR0MemObjDelete(&pMemNetBSD->Core);
    292292        return rc;
    293293    }
    294 
    295     *ppMem = &pMemNetBSD->Core;
    296     return VINF_SUCCESS;
     294    return VERR_NO_MEMORY;
    297295}
    298296
     
    301299                                          RTHCPHYS PhysHighest, size_t uAlignment, bool fContiguous, const char *pszTag)
    302300{
    303     paddr_t VmPhysAddrHigh;
    304 
    305301    /* create the object. */
    306302    PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), enmType, NULL, cb, pszTag);
    307     if (!pMemNetBSD)
    308         return VERR_NO_MEMORY;
    309 
    310     if (PhysHighest != NIL_RTHCPHYS)
    311         VmPhysAddrHigh = PhysHighest;
    312     else
    313         VmPhysAddrHigh = ~(paddr_t)0;
    314 
    315     int nsegs = fContiguous ? 1 : INT_MAX;
    316 
    317     int error = uvm_pglistalloc(cb, 0, VmPhysAddrHigh, uAlignment, 0, &pMemNetBSD->pglist, nsegs, 1);
    318     if (error)
    319     {
     303    if (pMemNetBSD)
     304    {
     305        paddr_t const VmPhysAddrHigh = PhysHighest != NIL_RTHCPHYS ? PhysHighest : ~(paddr_t)0;
     306        int const     nsegs          = fContiguous ? 1 : INT_MAX;
     307        int rc = uvm_pglistalloc(cb, 0, VmPhysAddrHigh, uAlignment, 0, &pMemNetBSD->pglist, nsegs, 1);
     308        if (!rc)
     309        {
     310            pMemNetBSD->Core.fFlags |= RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC; /*?*/
     311            if (fContiguous)
     312            {
     313                Assert(enmType == RTR0MEMOBJTYPE_PHYS);
     314                const struct vm_page * const pg = TAILQ_FIRST(&pMemNetBSD->pglist);
     315                pMemNetBSD->Core.u.Phys.PhysBase = VM_PAGE_TO_PHYS(pg);
     316                pMemNetBSD->Core.u.Phys.fAllocated = true;
     317            }
     318            *ppMem = &pMemNetBSD->Core;
     319            return VINF_SUCCESS;
     320        }
    320321        rtR0MemObjDelete(&pMemNetBSD->Core);
    321         return VERR_NO_MEMORY;
    322     }
    323 
    324     if (fContiguous)
    325     {
    326         Assert(enmType == RTR0MEMOBJTYPE_PHYS);
    327         const struct vm_page * const pg = TAILQ_FIRST(&pMemNetBSD->pglist);
    328         pMemNetBSD->Core.u.Phys.PhysBase = VM_PAGE_TO_PHYS(pg);
    329         pMemNetBSD->Core.u.Phys.fAllocated = true;
    330     }
    331     *ppMem = &pMemNetBSD->Core;
    332 
    333     return VINF_SUCCESS;
     322        return VERR_NO_PAGE_MEMORY;
     323    }
     324    return VERR_NO_MEMORY;
    334325}
    335326
     
    355346    /* create the object. */
    356347    PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_PHYS, NULL, cb, pszTag);
    357     if (!pMemNetBSD)
    358         return VERR_NO_MEMORY;
    359 
    360     /* there is no allocation here, it needs to be mapped somewhere first. */
    361     pMemNetBSD->Core.u.Phys.fAllocated = false;
    362     pMemNetBSD->Core.u.Phys.PhysBase = Phys;
    363     pMemNetBSD->Core.u.Phys.uCachePolicy = uCachePolicy;
    364     TAILQ_INIT(&pMemNetBSD->pglist);
    365     *ppMem = &pMemNetBSD->Core;
    366     return VINF_SUCCESS;
     348    if (pMemNetBSD)
     349    {
     350        /* there is no allocation here, it needs to be mapped somewhere first. */
     351        pMemNetBSD->Core.u.Phys.fAllocated = false;
     352        pMemNetBSD->Core.u.Phys.PhysBase = Phys;
     353        pMemNetBSD->Core.u.Phys.uCachePolicy = uCachePolicy;
     354        TAILQ_INIT(&pMemNetBSD->pglist);
     355        *ppMem = &pMemNetBSD->Core;
     356        return VINF_SUCCESS;
     357    }
     358    return VERR_NO_MEMORY;
    367359}
    368360
     
    373365    PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_LOCK,
    374366                                                                    (void *)R3Ptr, cb, pszTag);
    375     if (!pMemNetBSD)
    376         return VERR_NO_MEMORY;
    377 
    378     int rc = uvm_map_pageable(
    379             &((struct proc *)R0Process)->p_vmspace->vm_map,
    380             R3Ptr,
    381             R3Ptr + cb,
    382             0, 0);
    383     if (rc)
    384     {
     367    if (pMemNetBSD)
     368    {
     369        int rc = uvm_map_pageable(&((struct proc *)R0Process)->p_vmspace->vm_map, R3Ptr, R3Ptr + cb,
     370                                  0 /*new_pageable*/, 0 /*lockflags*/);
     371        if (!rc)
     372        {
     373            pMemNetBSD->Core.u.Lock.R0Process = R0Process;
     374            *ppMem = &pMemNetBSD->Core;
     375            return VINF_SUCCESS;
     376        }
    385377        rtR0MemObjDelete(&pMemNetBSD->Core);
    386         return VERR_NO_MEMORY;
    387     }
    388 
    389     pMemNetBSD->Core.u.Lock.R0Process = R0Process;
    390     *ppMem = &pMemNetBSD->Core;
    391     return VINF_SUCCESS;
     378        return VERR_LOCK_FAILED;
     379    }
     380    return VERR_NO_MEMORY;
    392381}
    393382
     
    397386    /* Kernel memory (always?) wired; all memory allocated by vbox code is? */
    398387    PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_LOCK, pv, cb, pszTag);
    399     if (!pMemNetBSD)
    400         return VERR_NO_MEMORY;
    401 
    402     pMemNetBSD->Core.u.Lock.R0Process = NIL_RTR0PROCESS;
    403     pMemNetBSD->Core.pv = pv;
    404     *ppMem = &pMemNetBSD->Core;
    405     return VINF_SUCCESS;
     388    if (pMemNetBSD)
     389    {
     390        pMemNetBSD->Core.u.Lock.R0Process = NIL_RTR0PROCESS;
     391        pMemNetBSD->Core.pv = pv;
     392        *ppMem = &pMemNetBSD->Core;
     393        return VINF_SUCCESS;
     394    }
     395    return VERR_NO_MEMORY;
    406396}
    407397
     
    418408    PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_RES_VIRT,
    419409                                                                    NULL, cb, pszTag);
    420     if (!pMemNetBSD)
    421         return VERR_NO_MEMORY;
    422 
    423     vaddr_t virt = uvm_km_alloc(kernel_map, cb, uAlignment,
    424             UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_CANFAIL);
    425     if (virt == 0)
    426     {
     410    if (pMemNetBSD)
     411    {
     412        vaddr_t virt = uvm_km_alloc(kernel_map, cb, uAlignment, UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_CANFAIL);
     413        if (virt != 0)
     414        {
     415            pMemNetBSD->Core.u.ResVirt.R0Process = NIL_RTR0PROCESS;
     416            pMemNetBSD->Core.pv = (void *)virt;
     417            *ppMem = &pMemNetBSD->Core;
     418            return VINF_SUCCESS;
     419        }
    427420        rtR0MemObjDelete(&pMemNetBSD->Core);
    428421        return VERR_NO_MEMORY;
    429422    }
    430 
    431     pMemNetBSD->Core.u.ResVirt.R0Process = NIL_RTR0PROCESS;
    432     pMemNetBSD->Core.pv = (void *)virt;
    433     *ppMem = &pMemNetBSD->Core;
    434     return VINF_SUCCESS;
     423    return VERR_NO_MEMORY;
    435424}
    436425
     
    466455    PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_MAPPING, NULL, sz, pszTag);
    467456
    468     vaddr_t virt = uvm_km_alloc(kernel_map, sz, uAlignment,
    469             UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_CANFAIL);
    470     if (virt == 0)
    471     {
    472         rtR0MemObjDelete(&pMemNetBSD->Core);
    473         return VERR_NO_MEMORY;
    474     }
    475 
    476     vm_prot_t prot = 0;
    477 
    478     if ((fProt & RTMEM_PROT_READ) == RTMEM_PROT_READ)
    479         prot |= VM_PROT_READ;
    480     if ((fProt & RTMEM_PROT_WRITE) == RTMEM_PROT_WRITE)
    481         prot |= VM_PROT_WRITE;
    482     if ((fProt & RTMEM_PROT_EXEC) == RTMEM_PROT_EXEC)
    483         prot |= VM_PROT_EXECUTE;
    484 
    485     struct vm_page *page;
    486     vaddr_t virt2 = virt;
    487     size_t map_pos = 0;
    488     TAILQ_FOREACH(page, &pMemNetBSD0->pglist, pageq.queue)
    489     {
    490         if (map_pos >= offSub)
    491         {
    492             if (cbSub > 0 && (map_pos >= offSub + cbSub))
    493                 break;
    494 
    495             pmap_kenter_pa(virt2, VM_PAGE_TO_PHYS(page), prot, 0);
    496             virt2 += PAGE_SIZE;
    497         }
    498         map_pos += PAGE_SIZE;
    499     }
    500 
    501     pMemNetBSD->Core.pv = (void *)virt;
    502     pMemNetBSD->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
    503     *ppMem = &pMemNetBSD->Core;
    504 
    505     return VINF_SUCCESS;
     457    vaddr_t virt = uvm_km_alloc(kernel_map, sz, uAlignment, UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_CANFAIL);
     458    if (virt != 0)
     459    {
     460        vm_prot_t prot = 0;
     461        if (fProt & RTMEM_PROT_READ)
     462            prot |= VM_PROT_READ;
     463        if (fProt & RTMEM_PROT_WRITE)
     464            prot |= VM_PROT_WRITE;
     465        if (fProt & RTMEM_PROT_EXEC)
     466            prot |= VM_PROT_EXECUTE;
     467
     468        struct vm_page *page;
     469        vaddr_t virt2 = virt;
     470        size_t map_pos = 0;
     471        TAILQ_FOREACH(page, &pMemNetBSD0->pglist, pageq.queue)
     472        {
     473            if (map_pos >= offSub)
     474            {
     475                if (cbSub > 0 && (map_pos >= offSub + cbSub))
     476                    break;
     477
     478                pmap_kenter_pa(virt2, VM_PAGE_TO_PHYS(page), prot, 0);
     479                virt2 += PAGE_SIZE;
     480            }
     481            map_pos += PAGE_SIZE;
     482        }
     483
     484        pMemNetBSD->Core.pv = (void *)virt;
     485        pMemNetBSD->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
     486        *ppMem = &pMemNetBSD->Core;
     487        return VINF_SUCCESS;
     488    }
     489
     490    rtR0MemObjDelete(&pMemNetBSD->Core);
     491    return VERR_NO_MEMORY;
    506492}
    507493
     
    518504DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
    519505{
    520     vm_prot_t          ProtectionFlags = 0;
    521     vaddr_t        AddrStart       = (vaddr_t)pMem->pv + offSub;
    522     vm_map_t           pVmMap          = rtR0MemObjNetBSDGetMap(pMem);
    523 
    524     if (!pVmMap)
    525         return VERR_NOT_SUPPORTED;
    526 
    527     if ((fProt & RTMEM_PROT_READ) == RTMEM_PROT_READ)
    528         ProtectionFlags |= UVM_PROT_R;
    529     if ((fProt & RTMEM_PROT_WRITE) == RTMEM_PROT_WRITE)
    530         ProtectionFlags |= UVM_PROT_W;
    531     if ((fProt & RTMEM_PROT_EXEC) == RTMEM_PROT_EXEC)
    532         ProtectionFlags |= UVM_PROT_X;
    533 
    534     int error = uvm_map_protect(pVmMap, AddrStart, AddrStart + cbSub,
    535         ProtectionFlags, 0);
    536     if (!error)
    537         return VINF_SUCCESS;
    538 
     506    vm_map_t const pVmMap = rtR0MemObjNetBSDGetMap(pMem);
     507    if (pVmMap)
     508    {
     509        vaddr_t const   AddrStart       = (vaddr_t)pMem->pv + offSub;
     510        vm_prot_t       ProtectionFlags = 0;
     511        if (fProt & RTMEM_PROT_READ)
     512            ProtectionFlags |= UVM_PROT_R;
     513        if (fProt & RTMEM_PROT_WRITE)
     514            ProtectionFlags |= UVM_PROT_W;
     515        if (fProt & RTMEM_PROT_EXEC)
     516            ProtectionFlags |= UVM_PROT_X;
     517
     518        int rc = uvm_map_protect(pVmMap, AddrStart, AddrStart + cbSub, ProtectionFlags, 0);
     519        if (!rc)
     520            return VINF_SUCCESS;
     521        return RTErrConvertFromErrno(rc);
     522    }
    539523    return VERR_NOT_SUPPORTED;
    540524}
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette