- Timestamp:
- Jul 30, 2020 9:05:38 AM (4 years ago)
- Location:
- trunk/src/VBox
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/HostDrivers/Support/SUPDrv.cpp
r85527 r85542 5255 5255 pImage->paSegments[iSeg].fProt); 5256 5256 } 5257 if (iSeg >= pImage->cSegments) 5258 { 5259 supdrvLdrUnlock(pDevExt); 5260 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq, 5261 "Bad entry point %p given for %s: no matching segment found (RVA %#zx)!", 5262 pv, pszSymbol, uRva); 5263 } 5257 5264 5258 5265 if (pImage->fNative) … … 5335 5342 } 5336 5343 5344 /* 5345 * Copy the segments before we start using supdrvLdrValidatePointer for entrypoint validation. 5346 */ 5347 pImage->cSegments = pReq->u.In.cSegments; 5348 { 5349 size_t cbSegments = pImage->cSegments * sizeof(SUPLDRSEG); 5350 pImage->paSegments = (PSUPLDRSEG)RTMemDup(&pReq->u.In.abImage[pReq->u.In.offSegments], cbSegments); 5351 if (pImage->paSegments) /* Align the last segment size to avoid upsetting RTR0MemObjProtect. */ /** @todo relax RTR0MemObjProtect */ 5352 pImage->paSegments[pImage->cSegments - 1].cb = RT_ALIGN_32(pImage->paSegments[pImage->cSegments - 1].cb, PAGE_SIZE); 5353 else 5354 { 5355 supdrvLdrUnlock(pDevExt); 5356 return supdrvLdrLoadError(VERR_NO_MEMORY, pReq, "Out of memory for segment table: %#x", cbSegments); 5357 } 5358 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING); 5359 } 5360 5361 /* 5362 * Validate entrypoints. 5363 */ 5337 5364 switch (pReq->u.In.eEPType) 5338 5365 { … … 5404 5431 if (!pImage->paSymbols) 5405 5432 rc = supdrvLdrLoadError(VERR_NO_MEMORY, pReq, "Out of memory for symbol table: %#x", cbSymbols); 5406 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);5407 }5408 5409 pImage->cSegments = pReq->u.In.cSegments;5410 if (RT_SUCCESS(rc))5411 {5412 size_t cbSegments = pImage->cSegments * sizeof(SUPLDRSEG);5413 pImage->paSegments = (PSUPLDRSEG)RTMemDup(&pReq->u.In.abImage[pReq->u.In.offSegments], cbSegments);5414 if (pImage->paSegments) /* Align the last segment size to avoid upsetting RTR0MemObjProtect. */ /** @todo relax RTR0MemObjProtect */5415 pImage->paSegments[pImage->cSegments - 1].cb = RT_ALIGN_32(pImage->paSegments[pImage->cSegments - 1].cb, PAGE_SIZE);5416 else5417 rc = supdrvLdrLoadError(VERR_NO_MEMORY, pReq, "Out of memory for segment table: %#x", cbSegments);5418 5433 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING); 5419 5434 } -
trunk/src/VBox/HostDrivers/Support/SUPDrvInternal.h
r85534 r85542 146 146 #endif 147 147 148 #if 0 /*defined(RT_OS_LINUX)*//** @todo make everyone do this */148 #if defined(RT_OS_LINUX) /** @todo make everyone do this */ 149 149 /** Use the RTR0MemObj API rather than the RTMemExecAlloc for the images. 150 150 * This is a good idea in general, but a necessity for @bugref{9801}. */ -
trunk/src/VBox/HostDrivers/Support/SUPLibLdr.cpp
r85526 r85542 359 359 RT_NOREF(hLdrMod); 360 360 361 Log2(("supLoadModuleCompileSegmentsCB: %RTptr/%RTptr LB %RTptr/%RTptr prot %#x %s\n", 362 pSeg->LinkAddress, pSeg->RVA, pSeg->cbMapped, pSeg->cb, pSeg->fProt, pSeg->pszName)); 363 361 364 /* Ignore segments not part of the loaded image. */ 362 365 if (pSeg->RVA == NIL_RTLDRADDR || pSeg->cbMapped == 0) 366 { 367 Log2(("supLoadModuleCompileSegmentsCB: -> skipped\n")); 363 368 return VINF_SUCCESS; 369 } 364 370 365 371 /* We currently ASSUME that all relevant segments are in ascending RVA order. */ … … 374 380 AssertReturn(pSeg->RVA < _1G, VERR_INTERNAL_ERROR_3); 375 381 uint32_t uRvaSeg = (uint32_t)pSeg->RVA; 376 Log2(("supLoadModuleCompileSegmentsCB: %RTptr/%RTptr LB %RTptr/%RTptr prot %#x %s\n",377 pSeg->LinkAddress, pSeg->RVA, pSeg->cbMapped, pSeg->cb, pSeg->fProt, pSeg->pszName));378 382 379 383 /* -
trunk/src/VBox/Runtime/r0drv/linux/mp-r0drv-linux.c
r83326 r85542 383 383 #ifdef CONFIG_SMP 384 384 IPRT_LINUX_SAVE_EFL_AC(); 385 int rc; 386 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; 385 int rc; 386 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; 387 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) /* 2.6.28 introduces CONFIG_CPUMASK_OFFSTACK */ 388 cpumask_var_t DstCpuMask; 389 # elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) 390 cpumask_t DstCpuMask; 391 # endif 387 392 388 393 AssertReturn(idCpu1 != idCpu2, VERR_INVALID_PARAMETER); 389 394 AssertReturn(!(fFlags & RTMPON_F_VALID_MASK), VERR_INVALID_FLAGS); 395 396 /* 397 * Prepare the CPU mask before we disable preemption. 398 */ 399 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) 400 if (!zalloc_cpumask_var(&DstCpuMask, GFP_KERNEL)) 401 return VERR_NO_MEMORY; 402 cpumask_set_cpu(idCpu1, DstCpuMask); 403 cpumask_set_cpu(idCpu2, DstCpuMask); 404 # elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) 405 if (!alloc_cpumask_var(&DstCpuMask, GFP_KERNEL)) 406 return VERR_NO_MEMORY; 407 cpumask_clear(DstCpuMask); 408 cpumask_set_cpu(idCpu1, DstCpuMask); 409 cpumask_set_cpu(idCpu2, DstCpuMask); 410 # elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) 411 cpus_clear(DstCpuMask); 412 cpu_set(idCpu1, DstCpuMask); 413 cpu_set(idCpu2, DstCpuMask); 414 # endif 390 415 391 416 /* … … 402 427 * call wait ourselves. 403 428 */ 404 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)405 /* 2.6.28 introduces CONFIG_CPUMASK_OFFSTACK */406 cpumask_var_t DstCpuMask;407 # elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)408 cpumask_t DstCpuMask;409 # endif410 429 RTCPUID idCpuSelf = RTMpCpuId(); 411 430 bool const fCallSelf = idCpuSelf == idCpu1 || idCpuSelf == idCpu2; … … 417 436 Args.idCpu2 = idCpu2; 418 437 Args.cHits = 0; 419 420 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)421 if (!zalloc_cpumask_var(&DstCpuMask, GFP_KERNEL))422 return VERR_NO_MEMORY;423 cpumask_set_cpu(idCpu1, DstCpuMask);424 cpumask_set_cpu(idCpu2, DstCpuMask);425 # elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)426 if (!alloc_cpumask_var(&DstCpuMask, GFP_KERNEL))427 return VERR_NO_MEMORY;428 cpumask_clear(DstCpuMask);429 cpumask_set_cpu(idCpu1, DstCpuMask);430 cpumask_set_cpu(idCpu2, DstCpuMask);431 # elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)432 cpus_clear(DstCpuMask);433 cpu_set(idCpu1, DstCpuMask);434 cpu_set(idCpu2, DstCpuMask);435 # endif436 438 437 439 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) … … 468 470 else 469 471 rc = VERR_CPU_IPE_1; 470 471 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)472 free_cpumask_var(DstCpuMask);473 # endif474 472 } 475 473 /* … … 481 479 else 482 480 rc = VERR_CPU_NOT_FOUND; 481 483 482 RTThreadPreemptRestore(&PreemptState);; 483 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) 484 free_cpumask_var(DstCpuMask); 485 # endif 484 486 IPRT_LINUX_RESTORE_EFL_AC(); 485 487 return rc;
Note:
See TracChangeset
for help on using the changeset viewer.