VirtualBox

Changeset 85542 in vbox for trunk


Ignore:
Timestamp:
Jul 30, 2020 9:05:38 AM (4 years ago)
Author:
vboxsync
Message:

IPRT/mp-r0drv-linux.c: Move the cpu set allocation & initialization out of the block running with preemption disabled. This hope to fix being on the wrong CPU when re-enabling preemption (seen once this morning after running supdrvTscMeasureDeltaOne, possibly involving the calling CPU).

Location:
trunk/src/VBox
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/HostDrivers/Support/SUPDrv.cpp

    r85527 r85542  
    52555255                                          pImage->paSegments[iSeg].fProt);
    52565256            }
     5257        if (iSeg >= pImage->cSegments)
     5258        {
     5259            supdrvLdrUnlock(pDevExt);
     5260            return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq,
     5261                                      "Bad entry point %p given for %s: no matching segment found (RVA %#zx)!",
     5262                                      pv, pszSymbol, uRva);
     5263        }
    52575264
    52585265        if (pImage->fNative)
     
    53355342    }
    53365343
     5344    /*
     5345     * Copy the segments before we start using supdrvLdrValidatePointer for entrypoint validation.
     5346     */
     5347    pImage->cSegments = pReq->u.In.cSegments;
     5348    {
     5349        size_t  cbSegments = pImage->cSegments * sizeof(SUPLDRSEG);
     5350        pImage->paSegments = (PSUPLDRSEG)RTMemDup(&pReq->u.In.abImage[pReq->u.In.offSegments], cbSegments);
     5351        if (pImage->paSegments) /* Align the last segment size to avoid upsetting RTR0MemObjProtect. */ /** @todo relax RTR0MemObjProtect */
     5352            pImage->paSegments[pImage->cSegments - 1].cb = RT_ALIGN_32(pImage->paSegments[pImage->cSegments - 1].cb, PAGE_SIZE);
     5353        else
     5354        {
     5355            supdrvLdrUnlock(pDevExt);
     5356            return supdrvLdrLoadError(VERR_NO_MEMORY, pReq, "Out of memory for segment table: %#x", cbSegments);
     5357        }
     5358        SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
     5359    }
     5360
     5361    /*
     5362     * Validate entrypoints.
     5363     */
    53375364    switch (pReq->u.In.eEPType)
    53385365    {
     
    54045431            if (!pImage->paSymbols)
    54055432                rc = supdrvLdrLoadError(VERR_NO_MEMORY, pReq, "Out of memory for symbol table: %#x", cbSymbols);
    5406             SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
    5407         }
    5408 
    5409         pImage->cSegments = pReq->u.In.cSegments;
    5410         if (RT_SUCCESS(rc))
    5411         {
    5412             size_t  cbSegments = pImage->cSegments * sizeof(SUPLDRSEG);
    5413             pImage->paSegments = (PSUPLDRSEG)RTMemDup(&pReq->u.In.abImage[pReq->u.In.offSegments], cbSegments);
    5414             if (pImage->paSegments) /* Align the last segment size to avoid upsetting RTR0MemObjProtect. */ /** @todo relax RTR0MemObjProtect */
    5415                 pImage->paSegments[pImage->cSegments - 1].cb = RT_ALIGN_32(pImage->paSegments[pImage->cSegments - 1].cb, PAGE_SIZE);
    5416             else
    5417                 rc = supdrvLdrLoadError(VERR_NO_MEMORY, pReq, "Out of memory for segment table: %#x", cbSegments);
    54185433            SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
    54195434        }
  • trunk/src/VBox/HostDrivers/Support/SUPDrvInternal.h

    r85534 r85542  
    146146#endif
    147147
    148 #if 0 /*defined(RT_OS_LINUX)*/ /** @todo make everyone do this */
     148#if defined(RT_OS_LINUX) /** @todo make everyone do this */
    149149/** Use the RTR0MemObj API rather than the RTMemExecAlloc for the images.
    150150 * This is a good idea in general, but a necessity for @bugref{9801}. */
  • trunk/src/VBox/HostDrivers/Support/SUPLibLdr.cpp

    r85526 r85542  
    359359    RT_NOREF(hLdrMod);
    360360
     361    Log2(("supLoadModuleCompileSegmentsCB: %RTptr/%RTptr LB %RTptr/%RTptr prot %#x %s\n",
     362          pSeg->LinkAddress, pSeg->RVA, pSeg->cbMapped, pSeg->cb, pSeg->fProt, pSeg->pszName));
     363
    361364    /* Ignore segments not part of the loaded image. */
    362365    if (pSeg->RVA == NIL_RTLDRADDR || pSeg->cbMapped == 0)
     366    {
     367        Log2(("supLoadModuleCompileSegmentsCB: -> skipped\n"));
    363368        return VINF_SUCCESS;
     369    }
    364370
    365371    /* We currently ASSUME that all relevant segments are in ascending RVA order. */
     
    374380    AssertReturn(pSeg->RVA      < _1G, VERR_INTERNAL_ERROR_3);
    375381    uint32_t uRvaSeg  = (uint32_t)pSeg->RVA;
    376     Log2(("supLoadModuleCompileSegmentsCB: %RTptr/%RTptr LB %RTptr/%RTptr prot %#x %s\n",
    377           pSeg->LinkAddress, pSeg->RVA, pSeg->cbMapped, pSeg->cb, pSeg->fProt, pSeg->pszName));
    378382
    379383    /*
  • trunk/src/VBox/Runtime/r0drv/linux/mp-r0drv-linux.c

    r83326 r85542  
    383383#ifdef CONFIG_SMP
    384384    IPRT_LINUX_SAVE_EFL_AC();
    385     int rc;
    386     RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
     385    int                     rc;
     386    RTTHREADPREEMPTSTATE    PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
     387# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) /* 2.6.28 introduces CONFIG_CPUMASK_OFFSTACK */
     388    cpumask_var_t           DstCpuMask;
     389# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
     390    cpumask_t               DstCpuMask;
     391# endif
    387392
    388393    AssertReturn(idCpu1 != idCpu2, VERR_INVALID_PARAMETER);
    389394    AssertReturn(!(fFlags & RTMPON_F_VALID_MASK), VERR_INVALID_FLAGS);
     395
     396    /*
     397     * Prepare the CPU mask before we disable preemption.
     398     */
     399# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)
     400    if (!zalloc_cpumask_var(&DstCpuMask, GFP_KERNEL))
     401        return VERR_NO_MEMORY;
     402    cpumask_set_cpu(idCpu1, DstCpuMask);
     403    cpumask_set_cpu(idCpu2, DstCpuMask);
     404# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
     405    if (!alloc_cpumask_var(&DstCpuMask, GFP_KERNEL))
     406        return VERR_NO_MEMORY;
     407    cpumask_clear(DstCpuMask);
     408    cpumask_set_cpu(idCpu1, DstCpuMask);
     409    cpumask_set_cpu(idCpu2, DstCpuMask);
     410# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
     411    cpus_clear(DstCpuMask);
     412    cpu_set(idCpu1, DstCpuMask);
     413    cpu_set(idCpu2, DstCpuMask);
     414# endif
    390415
    391416    /*
     
    402427         * call wait ourselves.
    403428         */
    404 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
    405         /* 2.6.28 introduces CONFIG_CPUMASK_OFFSTACK */
    406         cpumask_var_t DstCpuMask;
    407 # elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
    408         cpumask_t   DstCpuMask;
    409 # endif
    410429        RTCPUID     idCpuSelf = RTMpCpuId();
    411430        bool const  fCallSelf = idCpuSelf == idCpu1 || idCpuSelf == idCpu2;
     
    417436        Args.idCpu2  = idCpu2;
    418437        Args.cHits   = 0;
    419 
    420 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)
    421         if (!zalloc_cpumask_var(&DstCpuMask, GFP_KERNEL))
    422             return VERR_NO_MEMORY;
    423         cpumask_set_cpu(idCpu1, DstCpuMask);
    424         cpumask_set_cpu(idCpu2, DstCpuMask);
    425 # elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
    426         if (!alloc_cpumask_var(&DstCpuMask, GFP_KERNEL))
    427             return VERR_NO_MEMORY;
    428         cpumask_clear(DstCpuMask);
    429         cpumask_set_cpu(idCpu1, DstCpuMask);
    430         cpumask_set_cpu(idCpu2, DstCpuMask);
    431 # elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
    432         cpus_clear(DstCpuMask);
    433         cpu_set(idCpu1, DstCpuMask);
    434         cpu_set(idCpu2, DstCpuMask);
    435 # endif
    436438
    437439# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
     
    468470        else
    469471            rc = VERR_CPU_IPE_1;
    470 
    471 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
    472         free_cpumask_var(DstCpuMask);
    473 # endif
    474472    }
    475473    /*
     
    481479    else
    482480        rc = VERR_CPU_NOT_FOUND;
     481
    483482    RTThreadPreemptRestore(&PreemptState);;
     483# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
     484    free_cpumask_var(DstCpuMask);
     485# endif
    484486    IPRT_LINUX_RESTORE_EFL_AC();
    485487    return rc;
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette