Changeset 106634 in vbox
- Timestamp:
- Oct 23, 2024 8:33:54 PM (5 weeks ago)
- Location:
- trunk/src/VBox/HostDrivers/Support
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/HostDrivers/Support/Makefile.kmk
r106625 r106634 614 614 | $$(dir $$@) 615 615 $(SED) \ 616 -e '/not-arch-$(KBUILD_TARGET_ARCH)/d' \ 616 617 -f $(dir $<)/SUPR0-def-$(VBOX_LDR_FMT).sed \ 617 618 --output $@ \ -
trunk/src/VBox/HostDrivers/Support/SUPDrv.cpp
r106625 r106634 253 253 /* Entries with absolute addresses determined at runtime, fixup 254 254 code makes ugly ASSUMPTIONS about the order here: */ 255 SUPEXP_CUSTOM( 0, SUPR0AbsIs64bit, 0), /* only-amd64, only-x86 */ 256 SUPEXP_CUSTOM( 0, SUPR0Abs64bitKernelCS, 0), /* only-amd64, only-x86 */ 257 SUPEXP_CUSTOM( 0, SUPR0Abs64bitKernelSS, 0), /* only-amd64, only-x86 */ 258 SUPEXP_CUSTOM( 0, SUPR0Abs64bitKernelDS, 0), /* only-amd64, only-x86 */ 259 SUPEXP_CUSTOM( 0, SUPR0AbsKernelCS, 0), /* only-amd64, only-x86 */ 260 SUPEXP_CUSTOM( 0, SUPR0AbsKernelSS, 0), /* only-amd64, only-x86 */ 261 SUPEXP_CUSTOM( 0, SUPR0AbsKernelDS, 0), /* only-amd64, only-x86 */ 262 SUPEXP_CUSTOM( 0, SUPR0AbsKernelES, 0), /* only-amd64, only-x86 */ 263 SUPEXP_CUSTOM( 0, SUPR0AbsKernelFS, 0), /* only-amd64, only-x86 */ 264 SUPEXP_CUSTOM( 0, SUPR0AbsKernelGS, 0), /* only-amd64, only-x86 */ 255 #if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64) 256 SUPEXP_CUSTOM( 0, SUPR0AbsIs64bit, 0), /* not-arch-arm64 */ 257 SUPEXP_CUSTOM( 0, SUPR0Abs64bitKernelCS, 0), /* not-arch-arm64 */ 258 SUPEXP_CUSTOM( 0, SUPR0Abs64bitKernelSS, 0), /* not-arch-arm64 */ 259 SUPEXP_CUSTOM( 0, SUPR0Abs64bitKernelDS, 0), /* not-arch-arm64 */ 260 SUPEXP_CUSTOM( 0, SUPR0AbsKernelCS, 0), /* not-arch-arm64 */ 261 SUPEXP_CUSTOM( 0, SUPR0AbsKernelSS, 0), /* not-arch-arm64 */ 262 SUPEXP_CUSTOM( 0, SUPR0AbsKernelDS, 0), /* not-arch-arm64 */ 263 SUPEXP_CUSTOM( 0, SUPR0AbsKernelES, 0), /* not-arch-arm64 */ 264 SUPEXP_CUSTOM( 0, SUPR0AbsKernelFS, 0), /* not-arch-arm64 */ 265 SUPEXP_CUSTOM( 0, SUPR0AbsKernelGS, 0), /* not-arch-arm64 */ 266 #endif 265 267 /* Normal function & data pointers: */ 266 SUPEXP_CUSTOM( 0, g_pSUPGlobalInfoPage, 268 SUPEXP_CUSTOM( 0, g_pSUPGlobalInfoPage, &g_pSUPGlobalInfoPage), /* SED: DATA */ 267 269 SUPEXP_STK_OKAY( 0, SUPGetGIP), 268 270 SUPEXP_STK_BACK( 1, SUPReadTscWithDelta), … … 277 279 SUPEXP_STK_BACK( 5, SUPR0ContAlloc), 278 280 SUPEXP_STK_BACK( 2, SUPR0ContFree), 279 SUPEXP_STK_ BACK( 2, SUPR0ChangeCR4), /* only-amd64, only-x86 */280 SUPEXP_STK_BACK( 1, SUPR0EnableVTx), /* only-amd64, only-x86 */281 SUPEXP_STK_OKAY( 0, SUPR0GetKernelFeatures), 282 SUPEXP_STK_BACK( 0, SUPR0GetPagingMode), 281 283 SUPEXP_STK_OKAY( 1, SUPR0FpuBegin), 282 284 SUPEXP_STK_OKAY( 1, SUPR0FpuEnd), 283 SUPEXP_STK_BACK( 0, SUPR0SuspendVTxOnCpu), /* only-amd64, only-x86 */ 284 SUPEXP_STK_BACK( 1, SUPR0ResumeVTxOnCpu), /* only-amd64, only-x86 */ 285 SUPEXP_STK_OKAY( 1, SUPR0GetCurrentGdtRw), /* only-amd64, only-x86 */ 286 SUPEXP_STK_OKAY( 0, SUPR0GetKernelFeatures), 287 SUPEXP_STK_BACK( 3, SUPR0GetHwvirtMsrs), /* only-amd64, only-x86 */ 288 SUPEXP_STK_BACK( 0, SUPR0GetPagingMode), 289 SUPEXP_STK_BACK( 1, SUPR0GetSvmUsability), /* only-amd64, only-x86 */ 290 SUPEXP_STK_BACK( 1, SUPR0GetVTSupport), /* only-amd64, only-x86 */ 291 SUPEXP_STK_BACK( 1, SUPR0GetVmxUsability), /* only-amd64, only-x86 */ 285 #if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64) 286 SUPEXP_STK_BACK( 2, SUPR0ChangeCR4), /* not-arch-arm64 */ 287 SUPEXP_STK_BACK( 1, SUPR0EnableVTx), /* not-arch-arm64 */ 288 SUPEXP_STK_BACK( 0, SUPR0SuspendVTxOnCpu), /* not-arch-arm64 */ 289 SUPEXP_STK_BACK( 1, SUPR0ResumeVTxOnCpu), /* not-arch-arm64 */ 290 SUPEXP_STK_OKAY( 1, SUPR0GetCurrentGdtRw), /* not-arch-arm64 */ 291 SUPEXP_STK_BACK( 3, SUPR0GetHwvirtMsrs), /* not-arch-arm64 */ 292 SUPEXP_STK_BACK( 1, SUPR0GetSvmUsability), /* not-arch-arm64 */ 293 SUPEXP_STK_BACK( 1, SUPR0GetVTSupport), /* not-arch-arm64 */ 294 SUPEXP_STK_BACK( 1, SUPR0GetVmxUsability), /* not-arch-arm64 */ 295 #endif 292 296 SUPEXP_STK_BACK( 2, SUPR0LdrIsLockOwnerByMod), 293 297 SUPEXP_STK_BACK( 1, SUPR0LdrLock), … … 2411 2415 } 2412 2416 2413 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_VT_CAPS):2414 {2415 /* validate */2416 PSUPVTCAPS pReq = (PSUPVTCAPS)pReqHdr;2417 REQ_CHECK_SIZES(SUP_IOCTL_VT_CAPS);2418 2419 /* execute */2420 pReq->Hdr.rc = SUPR0QueryVTCaps(pSession, &pReq->u.Out.fCaps);2421 if (RT_FAILURE(pReq->Hdr.rc))2422 pReq->Hdr.cbOut = sizeof(pReq->Hdr);2423 return 0;2424 }2425 2426 2417 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_OPEN): 2427 2418 { … … 2545 2536 } 2546 2537 2538 #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) 2539 2540 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_VT_CAPS): 2541 { 2542 /* validate */ 2543 PSUPVTCAPS pReq = (PSUPVTCAPS)pReqHdr; 2544 REQ_CHECK_SIZES(SUP_IOCTL_VT_CAPS); 2545 2546 /* execute */ 2547 pReq->Hdr.rc = SUPR0QueryVTCaps(pSession, &pReq->u.Out.fCaps); 2548 if (RT_FAILURE(pReq->Hdr.rc)) 2549 pReq->Hdr.cbOut = sizeof(pReq->Hdr); 2550 return 0; 2551 } 2552 2547 2553 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_UCODE_REV): 2548 2554 { … … 2573 2579 return 0; 2574 2580 } 2581 2582 #endif /* !defined(RT_ARCH_AMD64) && !defined(RT_ARCH_X86) */ 2575 2583 2576 2584 default: … … 2646 2654 } 2647 2655 2656 #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) 2648 2657 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_VT_CAPS): 2649 2658 { … … 2658 2667 return 0; 2659 2668 } 2669 #endif /* !defined(RT_ARCH_AMD64) && !defined(RT_ARCH_X86) */ 2660 2670 2661 2671 default: … … 4225 4235 4226 4236 4237 #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) 4238 4227 4239 /** 4228 4240 * Change CR4 and take care of the kernel CR4 shadow if applicable. … … 4241 4253 SUPR0DECL(RTCCUINTREG) SUPR0ChangeCR4(RTCCUINTREG fOrMask, RTCCUINTREG fAndMask) 4242 4254 { 4243 #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)4244 4255 # ifdef RT_OS_LINUX 4245 4256 return supdrvOSChangeCR4(fOrMask, fAndMask); … … 4251 4262 return uOld; 4252 4263 # endif 4253 #else4254 RT_NOREF(fOrMask, fAndMask);4255 return RTCCUINTREG_MAX;4256 #endif4257 4264 } 4258 4265 SUPR0_EXPORT_SYMBOL(SUPR0ChangeCR4); … … 4270 4277 SUPR0DECL(int) SUPR0EnableVTx(bool fEnable) 4271 4278 { 4272 # if defined(RT_OS_DARWIN) && (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86))4279 # if defined(RT_OS_DARWIN) && (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)) 4273 4280 return supdrvOSEnableVTx(fEnable); 4274 # else4281 # else 4275 4282 RT_NOREF1(fEnable); 4276 4283 return VERR_NOT_SUPPORTED; 4277 # endif4284 # endif 4278 4285 } 4279 4286 SUPR0_EXPORT_SYMBOL(SUPR0EnableVTx); … … 4289 4296 SUPR0DECL(bool) SUPR0SuspendVTxOnCpu(void) 4290 4297 { 4291 # if defined(RT_OS_DARWIN) && (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86))4298 # if defined(RT_OS_DARWIN) && (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)) 4292 4299 return supdrvOSSuspendVTxOnCpu(); 4293 # else4300 # else 4294 4301 return false; 4295 # endif4302 # endif 4296 4303 } 4297 4304 SUPR0_EXPORT_SYMBOL(SUPR0SuspendVTxOnCpu); … … 4307 4314 SUPR0DECL(void) SUPR0ResumeVTxOnCpu(bool fSuspended) 4308 4315 { 4309 # if defined(RT_OS_DARWIN) && (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86))4316 # if defined(RT_OS_DARWIN) && (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)) 4310 4317 supdrvOSResumeVTxOnCpu(fSuspended); 4311 # else4318 # else 4312 4319 RT_NOREF1(fSuspended); 4313 4320 Assert(!fSuspended); 4314 # endif4321 # endif 4315 4322 } 4316 4323 SUPR0_EXPORT_SYMBOL(SUPR0ResumeVTxOnCpu); … … 4319 4326 SUPR0DECL(int) SUPR0GetCurrentGdtRw(RTHCUINTPTR *pGdtRw) 4320 4327 { 4321 # if defined(RT_OS_LINUX) && (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86))4328 # if defined(RT_OS_LINUX) && (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)) 4322 4329 return supdrvOSGetCurrentGdtRw(pGdtRw); 4323 # else4330 # else 4324 4331 NOREF(pGdtRw); 4325 4332 return VERR_NOT_IMPLEMENTED; 4326 # endif4333 # endif 4327 4334 } 4328 4335 SUPR0_EXPORT_SYMBOL(SUPR0GetCurrentGdtRw); … … 4341 4348 *pfCaps = 0; 4342 4349 4343 #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)4344 4350 /* Check if the CPU even supports CPUID (extremely ancient CPUs). */ 4345 4351 if (ASMHasCpuId()) … … 4392 4398 } 4393 4399 } 4394 #endif4395 4400 return VERR_UNSUPPORTED_CPU; 4396 4401 } … … 4414 4419 SUPR0DECL(int) SUPR0GetVmxUsability(bool *pfIsSmxModeAmbiguous) 4415 4420 { 4416 #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)4417 4421 uint64_t fFeatMsr; 4418 4422 bool fMaybeSmxMode; … … 4473 4477 */ 4474 4478 uint32_t fFeaturesECX, uDummy; 4475 # ifdef VBOX_STRICT4479 # ifdef VBOX_STRICT 4476 4480 /* Callers should have verified these at some point. */ 4477 4481 uint32_t uMaxId, uVendorEBX, uVendorECX, uVendorEDX; … … 4481 4485 || RTX86IsViaCentaurCpu(uVendorEBX, uVendorECX, uVendorEDX) 4482 4486 || RTX86IsShanghaiCpu( uVendorEBX, uVendorECX, uVendorEDX)); 4483 # endif4487 # endif 4484 4488 ASMCpuId(1, &uDummy, &uDummy, &fFeaturesECX, &uDummy); 4485 4489 bool fSmxVmxHwSupport = false; … … 4522 4526 4523 4527 return rc; 4524 4525 #else /* !defined(RT_ARCH_AMD64) && !defined(RT_ARCH_X86) */4526 if (pfIsSmxModeAmbiguous)4527 *pfIsSmxModeAmbiguous = false;4528 return VERR_UNSUPPORTED_CPU;4529 #endif /* !defined(RT_ARCH_AMD64) && !defined(RT_ARCH_X86) */4530 4528 } 4531 4529 SUPR0_EXPORT_SYMBOL(SUPR0GetVmxUsability); … … 4542 4540 SUPR0DECL(int) SUPR0GetSvmUsability(bool fInitSvm) 4543 4541 { 4544 #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)4545 4542 int rc; 4546 4543 uint64_t fVmCr; … … 4577 4574 rc = VERR_SVM_DISABLED; 4578 4575 return rc; 4579 4580 #else /* !defined(RT_ARCH_AMD64) && !defined(RT_ARCH_X86) */4581 RT_NOREF(fInitSvm);4582 return VERR_UNSUPPORTED_CPU;4583 #endif /* !defined(RT_ARCH_AMD64) && !defined(RT_ARCH_X86) */4584 4576 } 4585 4577 SUPR0_EXPORT_SYMBOL(SUPR0GetSvmUsability); 4586 4578 4587 4579 4588 #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)4589 4580 /** 4590 4581 * Queries the AMD-V and VT-x capabilities of the calling CPU. … … 4672 4663 return rc; 4673 4664 } 4674 #endif /* !defined(RT_ARCH_AMD64) && !defined(RT_ARCH_X86) */4675 4665 4676 4666 … … 4701 4691 AssertPtrReturn(pfCaps, VERR_INVALID_POINTER); 4702 4692 4703 #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)4704 4693 /* 4705 4694 * Call common worker. 4706 4695 */ 4707 4696 return supdrvQueryVTCapsInternal(pfCaps); 4708 #else4709 return VERR_UNSUPPORTED_CPU;4710 #endif4711 4697 } 4712 4698 SUPR0_EXPORT_SYMBOL(SUPR0QueryVTCaps); 4713 4699 4714 4700 4715 #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)4716 4701 /** 4717 4702 * Queries the CPU microcode revision. … … 4780 4765 return rc; 4781 4766 } 4782 #endif /* !defined(RT_ARCH_AMD64) && !defined(RT_ARCH_X86) */4783 4767 4784 4768 … … 4804 4788 * Call common worker. 4805 4789 */ 4806 #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)4807 4790 return supdrvQueryUcodeRev(puRevision); 4808 #else4809 return VERR_UNSUPPORTED_CPU;4810 #endif4811 4791 } 4812 4792 SUPR0_EXPORT_SYMBOL(SUPR0QueryUcodeRev); … … 4825 4805 SUPR0DECL(int) SUPR0GetHwvirtMsrs(PSUPHWVIRTMSRS pMsrs, uint32_t fCaps, bool fForce) 4826 4806 { 4827 #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)4828 4807 int rc; 4829 4808 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; … … 4916 4895 4917 4896 return rc; 4918 4919 #else /* !defined(RT_ARCH_AMD64) && !defined(RT_ARCH_X86) */ 4920 RT_NOREF(pMsrs, fCaps, fForce); 4921 return VERR_UNSUPPORTED_CPU; 4897 } 4898 SUPR0_EXPORT_SYMBOL(SUPR0GetHwvirtMsrs); 4899 4922 4900 #endif /* !defined(RT_ARCH_AMD64) && !defined(RT_ARCH_X86) */ 4923 }4924 SUPR0_EXPORT_SYMBOL(SUPR0GetHwvirtMsrs);4925 4901 4926 4902
Note:
See TracChangeset
for help on using the changeset viewer.