VirtualBox

Changeset 87529 in vbox for trunk/src/VBox/VMM/VMMR0


Ignore:
Timestamp:
Feb 2, 2021 10:56:44 AM (4 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
142554
Message:

VMM/HMVMX: Fixed leak in hmR0VmxPagesAllocZ. Eliminated gotos in caller.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r87522 r87529  
    17911791 *
    17921792 * @returns VBox status code.
    1793  * @param   hMemObj         The ring-0 memory object associated with the allocation.
     1793 * @param   phMemObj        Where to return the handle to the allocation.
    17941794 * @param   paAllocInfo     The pointer to the first element of the VMX
    17951795 *                          page-allocation info object array.
    17961796 * @param   cEntries        The number of elements in the @a paAllocInfo array.
    17971797 */
    1798 static int hmR0VmxPagesAllocZ(RTR0MEMOBJ hMemObj, PVMXPAGEALLOCINFO paAllocInfo, uint32_t cEntries)
    1799 {
     1798static int hmR0VmxPagesAllocZ(PRTR0MEMOBJ phMemObj, PVMXPAGEALLOCINFO paAllocInfo, uint32_t cEntries)
     1799{
     1800    *phMemObj = NIL_RTR0MEMOBJ;
     1801
    18001802    /* Figure out how many pages to allocate. */
    18011803    uint32_t cPages = 0;
     
    18061808    if (cPages)
    18071809    {
    1808         size_t const cbPages = cPages << X86_PAGE_4K_SHIFT;
    1809         int rc = RTR0MemObjAllocPage(&hMemObj, cbPages, false /* fExecutable */);
     1810        size_t const cbPages = cPages << PAGE_SHIFT;
     1811        int rc = RTR0MemObjAllocPage(phMemObj, cbPages, false /* fExecutable */);
    18101812        if (RT_FAILURE(rc))
    18111813            return rc;
    18121814
    18131815        /* Zero the contents and assign each page to the corresponding VMX page-allocation entry. */
    1814         void *pvFirstPage = RTR0MemObjAddress(hMemObj);
    1815         ASMMemZero32(pvFirstPage, cbPages);
     1816        void *pvFirstPage = RTR0MemObjAddress(*phMemObj);
     1817        RT_BZERO(pvFirstPage, cbPages);
    18161818
    18171819        uint32_t iPage = 0;
     
    18191821            if (paAllocInfo[i].fValid)
    18201822            {
    1821                 RTHCPHYS const HCPhysPage = RTR0MemObjGetPagePhysAddr(hMemObj, iPage);
     1823                RTHCPHYS const HCPhysPage = RTR0MemObjGetPagePhysAddr(*phMemObj, iPage);
    18221824                void          *pvPage     = (void *)((uintptr_t)pvFirstPage + (iPage << X86_PAGE_4K_SHIFT));
    18231825                Assert(HCPhysPage && HCPhysPage != NIL_RTHCPHYS);
     
    19141916    bool const fShadowVmcs = !fIsNstGstVmcs ? pVM->hm.s.vmx.fUseVmcsShadowing : pVM->cpum.ro.GuestFeatures.fVmxVmcsShadowing;
    19151917    Assert(!pVM->cpum.ro.GuestFeatures.fVmxVmcsShadowing);  /* VMCS shadowing is not yet exposed to the guest. */
    1916     VMXPAGEALLOCINFO aAllocInfo[] = {
     1918    VMXPAGEALLOCINFO aAllocInfo[] =
     1919    {
    19171920        { true,        0 /* Unused */, &pVmcsInfo->HCPhysVmcs,         &pVmcsInfo->pvVmcs         },
    19181921        { true,        0 /* Unused */, &pVmcsInfo->HCPhysGuestMsrLoad, &pVmcsInfo->pvGuestMsrLoad },
     
    19221925    };
    19231926
    1924     int rc = hmR0VmxPagesAllocZ(pVmcsInfo->hMemObj, &aAllocInfo[0], RT_ELEMENTS(aAllocInfo));
     1927    int rc = hmR0VmxPagesAllocZ(&pVmcsInfo->hMemObj, &aAllocInfo[0], RT_ELEMENTS(aAllocInfo));
    19251928    if (RT_FAILURE(rc))
    19261929        return rc;
     
    20222025    bool const fVirtApicAccess   = RT_BOOL(pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS);
    20232026    bool const fUseVmcsShadowing = pVM->hm.s.vmx.fUseVmcsShadowing;
    2024     VMXPAGEALLOCINFO aAllocInfo[] = {
     2027    VMXPAGEALLOCINFO aAllocInfo[] =
     2028    {
    20252029        { fVirtApicAccess,   0 /* Unused */, &pVM->hm.s.vmx.HCPhysApicAccess,    (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess },
    20262030        { fUseVmcsShadowing, 0 /* Unused */, &pVM->hm.s.vmx.HCPhysVmreadBitmap,  &pVM->hm.s.vmx.pvVmreadBitmap         },
     
    20312035    };
    20322036
    2033     int rc = hmR0VmxPagesAllocZ(pVM->hm.s.vmx.hMemObj, &aAllocInfo[0], RT_ELEMENTS(aAllocInfo));
    2034     if (RT_FAILURE(rc))
    2035         goto cleanup;
    2036 
     2037    int rc = hmR0VmxPagesAllocZ(&pVM->hm.s.vmx.hMemObj, &aAllocInfo[0], RT_ELEMENTS(aAllocInfo));
     2038    if (RT_SUCCESS(rc))
     2039    {
    20372040#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    2038     /* Allocate the shadow VMCS-fields array. */
    2039     if (fUseVmcsShadowing)
    2040     {
    2041         Assert(!pVM->hm.s.vmx.cShadowVmcsFields);
    2042         Assert(!pVM->hm.s.vmx.cShadowVmcsRoFields);
    2043         pVM->hm.s.vmx.paShadowVmcsFields   = (uint32_t *)RTMemAllocZ(sizeof(g_aVmcsFields));
    2044         pVM->hm.s.vmx.paShadowVmcsRoFields = (uint32_t *)RTMemAllocZ(sizeof(g_aVmcsFields));
    2045         if (RT_LIKELY(   pVM->hm.s.vmx.paShadowVmcsFields
    2046                       && pVM->hm.s.vmx.paShadowVmcsRoFields))
    2047         { /* likely */ }
    2048         else
    2049         {
    2050             rc = VERR_NO_MEMORY;
    2051             goto cleanup;
    2052         }
    2053     }
     2041        /* Allocate the shadow VMCS-fields array. */
     2042        if (fUseVmcsShadowing)
     2043        {
     2044            Assert(!pVM->hm.s.vmx.cShadowVmcsFields);
     2045            Assert(!pVM->hm.s.vmx.cShadowVmcsRoFields);
     2046            pVM->hm.s.vmx.paShadowVmcsFields   = (uint32_t *)RTMemAllocZ(sizeof(g_aVmcsFields));
     2047            pVM->hm.s.vmx.paShadowVmcsRoFields = (uint32_t *)RTMemAllocZ(sizeof(g_aVmcsFields));
     2048            if (!pVM->hm.s.vmx.paShadowVmcsFields || !pVM->hm.s.vmx.paShadowVmcsRoFields)
     2049                rc = VERR_NO_MEMORY;
     2050        }
    20542051#endif
    20552052
    2056     /*
    2057      * Allocate per-VCPU VT-x structures.
    2058      */
    2059     for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
    2060     {
    2061         /* Allocate the guest VMCS structures. */
    2062         PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
    2063         rc = hmR0VmxAllocVmcsInfo(pVCpu, &pVCpu->hmr0.s.vmx.VmcsInfo, false /* fIsNstGstVmcs */);
    2064         if (RT_FAILURE(rc))
    2065             goto cleanup;
     2053        /*
     2054         * Allocate per-VCPU VT-x structures.
     2055         */
     2056        for (VMCPUID idCpu = 0; idCpu < pVM->cCpus && RT_SUCCESS(rc); idCpu++)
     2057        {
     2058            /* Allocate the guest VMCS structures. */
     2059            PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
     2060            rc = hmR0VmxAllocVmcsInfo(pVCpu, &pVCpu->hmr0.s.vmx.VmcsInfo, false /* fIsNstGstVmcs */);
    20662061
    20672062#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    2068         /* Allocate the nested-guest VMCS structures, when the VMX feature is exposed to the guest. */
    2069         if (pVM->cpum.ro.GuestFeatures.fVmx)
    2070         {
    2071             rc = hmR0VmxAllocVmcsInfo(pVCpu, &pVCpu->hmr0.s.vmx.VmcsInfoNstGst, true /* fIsNstGstVmcs */);
    2072             if (RT_FAILURE(rc))
    2073                 goto cleanup;
    2074         }
     2063            /* Allocate the nested-guest VMCS structures, when the VMX feature is exposed to the guest. */
     2064            if (pVM->cpum.ro.GuestFeatures.fVmx && RT_SUCCESS(rc))
     2065                rc = hmR0VmxAllocVmcsInfo(pVCpu, &pVCpu->hmr0.s.vmx.VmcsInfoNstGst, true /* fIsNstGstVmcs */);
    20752066#endif
    2076     }
    2077 
    2078     return VINF_SUCCESS;
    2079 
    2080 cleanup:
     2067        }
     2068        if (RT_SUCCESS(rc))
     2069            return VINF_SUCCESS;
     2070    }
    20812071    hmR0VmxStructsFree(pVM);
    2082     Assert(rc != VINF_SUCCESS);
    20832072    return rc;
    20842073}
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette