Changeset 78869 in vbox for trunk/src/VBox/VMM
- Timestamp:
- May 30, 2019 8:32:28 AM (6 years ago)
- svn:sync-xref-src-repo-rev:
- 130981
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
r78868 r78869 2975 2975 { 2976 2976 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs); 2977 Assert(pVmcs); 2977 2978 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, pCtx, VMX_PROC_CTLS_USE_TSC_OFFSETTING)) 2978 2979 return uTicks + pVmcs->u64TscOffset.u; … … 2982 2983 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 2983 2984 { 2984 /** @todo r=bird: Bake HMApplySvmNstGstTscOffset into HMHasGuestSvmVmcbCached to save a call. */2985 if (!HM HasGuestSvmVmcbCached(pVCpu))2985 uint64_t u64TscOffset; 2986 if (!HMGetGuestSvmTscOffset(pVCpu, &u64TscOffset)) 2986 2987 { 2987 2988 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); 2988 return uTicks + pVmcb->ctrl.u64TSCOffset; 2989 Assert(pVmcb); 2990 u64TscOffset = pVmcb->ctrl.u64TSCOffset; 2989 2991 } 2990 return HMApplySvmNstGstTscOffset(pVCpu, uTicks);2992 return uTicks + u64TscOffset; 2991 2993 } 2992 2994 #else … … 3016 3018 { 3017 3019 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs); 3020 Assert(pVmcs); 3018 3021 return uTicks - pVmcs->u64TscOffset.u; 3019 3022 } … … 3023 3026 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 3024 3027 { 3025 /** @todo r=bird: Bake HMApplySvmNstGstTscOffset into HMRemoveSvmNstGstTscOffset to save a call. */3026 if (!HM HasGuestSvmVmcbCached(pVCpu))3028 uint64_t u64TscOffset; 3029 if (!HMGetGuestSvmTscOffset(pVCpu, &u64TscOffset)) 3027 3030 { 3028 3031 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); 3029 return uTicks - pVmcb->ctrl.u64TSCOffset; 3032 Assert(pVmcb); 3033 u64TscOffset = pVmcb->ctrl.u64TSCOffset; 3030 3034 } 3031 return HMRemoveSvmNstGstTscOffset(pVCpu, uTicks);3035 return uTicks - u64TscOffset; 3032 3036 } 3033 3037 #else -
trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp
r78866 r78869 189 189 bool const fUseVGif = fVGif && pVM->hm.s.svm.fVGif; 190 190 return fVGif && fUseVGif; 191 }192 193 194 /**195 * Applies the TSC offset of an SVM nested-guest if any and returns the new TSC196 * value for the nested-guest.197 *198 * @returns The TSC offset after applying any nested-guest TSC offset.199 * @param pVCpu The cross context virtual CPU structure of the calling EMT.200 * @param uTicks The guest TSC.201 *202 * @remarks This function looks at the VMCB cache rather than directly at the203 * nested-guest VMCB. The latter may have been modified for executing204 * using hardware-assisted SVM.205 *206 * @sa CPUMRemoveNestedGuestTscOffset, HMRemoveSvmNstGstTscOffset.207 */208 VMM_INT_DECL(uint64_t) HMApplySvmNstGstTscOffset(PCVMCPU pVCpu, uint64_t uTicks)209 {210 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;211 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx)); RT_NOREF(pCtx);212 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;213 Assert(pVmcbNstGstCache->fCacheValid);214 return uTicks + pVmcbNstGstCache->u64TSCOffset;215 }216 217 218 /**219 * Removes the TSC offset of an SVM nested-guest if any and returns the new TSC220 * value for the guest.221 *222 * @returns The TSC offset after removing any nested-guest TSC offset.223 * @param pVCpu The cross context virtual CPU structure of the calling EMT.224 * @param uTicks The nested-guest TSC.225 *226 * @remarks This function looks at the VMCB cache rather than directly at the227 * nested-guest VMCB. The latter may have been modified for executing228 * using hardware-assisted SVM.229 *230 * @sa CPUMApplyNestedGuestTscOffset, HMApplySvmNstGstTscOffset.231 */232 VMM_INT_DECL(uint64_t) HMRemoveSvmNstGstTscOffset(PCVMCPU pVCpu, uint64_t uTicks)233 {234 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;235 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx)); RT_NOREF(pCtx);236 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;237 Assert(pVmcbNstGstCache->fCacheValid);238 return uTicks - pVmcbNstGstCache->u64TSCOffset;239 191 } 240 192 … … 354 306 355 307 /** 356 * Returns whether HM has cached the nested-guest VMCB. 357 * 358 * If the VMCB is cached by HM, it means HM may have potentially modified the 359 * VMCB for execution using hardware-assisted SVM. 360 * 361 * @returns true if HM has cached the nested-guest VMCB, false otherwise. 362 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 363 */ 364 VMM_INT_DECL(bool) HMHasGuestSvmVmcbCached(PCVMCPU pVCpu) 365 { 366 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 367 return pVmcbNstGstCache->fCacheValid; 368 } 369 370 371 /** 372 * Checks if the nested-guest VMCB has the specified ctrl/instruction intercept 373 * active. 374 * 375 * @returns @c true if in intercept is set, @c false otherwise. 376 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 377 * @param fIntercept The SVM control/instruction intercept, see 378 * SVM_CTRL_INTERCEPT_*. 379 */ 380 VMM_INT_DECL(bool) HMIsGuestSvmCtrlInterceptSet(PCVMCPU pVCpu, uint64_t fIntercept) 381 { 382 Assert(HMHasGuestSvmVmcbCached(pVCpu)); 383 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 384 return RT_BOOL(pVmcbNstGstCache->u64InterceptCtrl & fIntercept); 385 } 386 387 388 /** 389 * Checks if the nested-guest VMCB has the specified CR read intercept active. 390 * 391 * @returns @c true if in intercept is set, @c false otherwise. 392 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 393 * @param uCr The CR register number (0 to 15). 394 */ 395 VMM_INT_DECL(bool) HMIsGuestSvmReadCRxInterceptSet(PCVMCPU pVCpu, uint8_t uCr) 396 { 397 Assert(uCr < 16); 398 Assert(HMHasGuestSvmVmcbCached(pVCpu)); 399 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 400 return RT_BOOL(pVmcbNstGstCache->u16InterceptRdCRx & (1 << uCr)); 401 } 402 403 404 /** 405 * Checks if the nested-guest VMCB has the specified CR write intercept active. 406 * 407 * @returns @c true if in intercept is set, @c false otherwise. 408 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 409 * @param uCr The CR register number (0 to 15). 410 */ 411 VMM_INT_DECL(bool) HMIsGuestSvmWriteCRxInterceptSet(PCVMCPU pVCpu, uint8_t uCr) 412 { 413 Assert(uCr < 16); 414 Assert(HMHasGuestSvmVmcbCached(pVCpu)); 415 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 416 return RT_BOOL(pVmcbNstGstCache->u16InterceptWrCRx & (1 << uCr)); 417 } 418 419 420 /** 421 * Checks if the nested-guest VMCB has the specified DR read intercept active. 422 * 423 * @returns @c true if in intercept is set, @c false otherwise. 424 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 425 * @param uDr The DR register number (0 to 15). 426 */ 427 VMM_INT_DECL(bool) HMIsGuestSvmReadDRxInterceptSet(PCVMCPU pVCpu, uint8_t uDr) 428 { 429 Assert(uDr < 16); 430 Assert(HMHasGuestSvmVmcbCached(pVCpu)); 431 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 432 return RT_BOOL(pVmcbNstGstCache->u16InterceptRdDRx & (1 << uDr)); 433 } 434 435 436 /** 437 * Checks if the nested-guest VMCB has the specified DR write intercept active. 438 * 439 * @returns @c true if in intercept is set, @c false otherwise. 440 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 441 * @param uDr The DR register number (0 to 15). 442 */ 443 VMM_INT_DECL(bool) HMIsGuestSvmWriteDRxInterceptSet(PCVMCPU pVCpu, uint8_t uDr) 444 { 445 Assert(uDr < 16); 446 Assert(HMHasGuestSvmVmcbCached(pVCpu)); 447 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 448 return RT_BOOL(pVmcbNstGstCache->u16InterceptWrDRx & (1 << uDr)); 449 } 450 451 452 /** 453 * Checks if the nested-guest VMCB has the specified exception intercept active. 454 * 455 * @returns true if in intercept is active, false otherwise. 456 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 457 * @param uVector The exception / interrupt vector. 458 */ 459 VMM_INT_DECL(bool) HMIsGuestSvmXcptInterceptSet(PCVMCPU pVCpu, uint8_t uVector) 460 { 461 Assert(uVector < 32); 462 Assert(HMHasGuestSvmVmcbCached(pVCpu)); 463 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 464 return RT_BOOL(pVmcbNstGstCache->u32InterceptXcpt & (1 << uVector)); 308 * Gets the SVM nested-guest control intercepts if cached by HM. 309 * 310 * @returns @c true on success, @c false otherwise. 311 * @param pVCpu The cross context virtual CPU structure of the calling 312 * EMT. 313 * @param pu64Intercepts Where to store the control intercepts. Only updated when 314 * @c true is returned. 315 */ 316 VMM_INT_DECL(bool) HMGetGuestSvmCtrlIntercepts(PCVMCPU pVCpu, uint64_t *pu64Intercepts) 317 { 318 Assert(pu64Intercepts); 319 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 320 if (pVmcbNstGstCache->fCacheValid) 321 { 322 *pu64Intercepts = pVmcbNstGstCache->u64InterceptCtrl; 323 return true; 324 } 325 return false; 326 } 327 328 329 /** 330 * Gets the SVM nested-guest CRx-read intercepts if cached by HM. 331 * 332 * @returns @c true on success, @c false otherwise. 333 * @param pVCpu The cross context virtual CPU structure of the calling 334 * EMT. 335 * @param pu16Intercepts Where to store the CRx-read intercepts. Only updated 336 * when @c true is returned. 337 */ 338 VMM_INT_DECL(bool) HMGetGuestSvmReadCRxIntercepts(PCVMCPU pVCpu, uint16_t *pu16Intercepts) 339 { 340 Assert(pu16Intercepts); 341 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 342 if (pVmcbNstGstCache->fCacheValid) 343 { 344 *pu16Intercepts = pVmcbNstGstCache->u16InterceptRdCRx; 345 return true; 346 } 347 return false; 348 } 349 350 351 /** 352 * Gets the SVM nested-guest CRx-write intercepts if cached by HM. 353 * 354 * @returns @c true on success, @c false otherwise. 355 * @param pVCpu The cross context virtual CPU structure of the calling 356 * EMT. 357 * @param pu16Intercepts Where to store the CRx-write intercepts. Only updated 358 * when @c true is returned. 359 */ 360 VMM_INT_DECL(bool) HMGetGuestSvmWriteCRxIntercepts(PCVMCPU pVCpu, uint16_t *pu16Intercepts) 361 { 362 Assert(pu16Intercepts); 363 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 364 if (pVmcbNstGstCache->fCacheValid) 365 { 366 *pu16Intercepts = pVmcbNstGstCache->u16InterceptWrCRx; 367 return true; 368 } 369 return false; 370 } 371 372 373 /** 374 * Gets the SVM nested-guest DRx-read intercepts if cached by HM. 375 * 376 * @returns @c true on success, @c false otherwise. 377 * @param pVCpu The cross context virtual CPU structure of the calling 378 * EMT. 379 * @param pu16Intercepts Where to store the DRx-read intercepts. Only updated 380 * when @c true is returned. 381 */ 382 VMM_INT_DECL(bool) HMGetGuestSvmReadDRxIntercepts(PCVMCPU pVCpu, uint16_t *pu16Intercepts) 383 { 384 Assert(pu16Intercepts); 385 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 386 if (pVmcbNstGstCache->fCacheValid) 387 { 388 *pu16Intercepts = pVmcbNstGstCache->u16InterceptRdDRx; 389 return true; 390 } 391 return false; 392 } 393 394 395 /** 396 * Gets the SVM nested-guest DRx-write intercepts if cached by HM. 397 * 398 * @returns @c true on success, @c false otherwise. 399 * @param pVCpu The cross context virtual CPU structure of the calling 400 * EMT. 401 * @param pu16Intercepts Where to store the DRx-write intercepts. Only updated 402 * when @c true is returned. 403 */ 404 VMM_INT_DECL(bool) HMGetGuestSvmWriteDRxIntercepts(PCVMCPU pVCpu, uint16_t *pu16Intercepts) 405 { 406 Assert(pu16Intercepts); 407 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 408 if (pVmcbNstGstCache->fCacheValid) 409 { 410 *pu16Intercepts = pVmcbNstGstCache->u16InterceptWrDRx; 411 return true; 412 } 413 return false; 414 } 415 416 417 /** 418 * Gets the SVM nested-guest exception intercepts if cached by HM. 419 * 420 * @returns @c true on success, @c false otherwise. 421 * @param pVCpu The cross context virtual CPU structure of the calling 422 * EMT. 423 * @param pu32Intercepts Where to store the exception intercepts. Only updated 424 * when @c true is returned. 425 */ 426 VMM_INT_DECL(bool) HMGetGuestSvmXcptIntercepts(PCVMCPU pVCpu, uint32_t *pu32Intercepts) 427 { 428 Assert(pu32Intercepts); 429 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 430 if (pVmcbNstGstCache->fCacheValid) 431 { 432 *pu32Intercepts = pVmcbNstGstCache->u32InterceptXcpt; 433 return true; 434 } 435 return false; 465 436 } 466 437 … … 469 440 * Checks if the nested-guest VMCB has virtual-interrupts masking enabled. 470 441 * 471 * @returns true if virtual-interrupts are masked, @c false otherwise. 472 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 473 */ 474 VMM_INT_DECL(bool) HMIsGuestSvmVirtIntrMasking(PCVMCPU pVCpu) 475 { 476 Assert(HMHasGuestSvmVmcbCached(pVCpu)); 477 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 478 return pVmcbNstGstCache->fVIntrMasking; 479 } 480 481 482 /** 483 * Checks if the nested-guest VMCB has nested-paging enabled. 484 * 485 * @returns true if nested-paging is enabled, @c false otherwise. 486 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 487 */ 488 VMM_INT_DECL(bool) HMIsGuestSvmNestedPagingEnabled(PCVMCPU pVCpu) 489 { 490 Assert(HMHasGuestSvmVmcbCached(pVCpu)); 491 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 492 return pVmcbNstGstCache->fNestedPaging; 442 * @returns @c true on success, @c false otherwise. 443 * @param pVCpu The cross context virtual CPU structure of the calling 444 * EMT. 445 * @param pfVIntrMasking Where to store the virtual-interrupt masking bit. 446 * Updated only when @c true is returned. 447 */ 448 VMM_INT_DECL(bool) HMGetGuestSvmVirtIntrMasking(PCVMCPU pVCpu, bool *pfVIntrMasking) 449 { 450 Assert(pfVIntrMasking); 451 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 452 if (pVmcbNstGstCache->fCacheValid) 453 { 454 *pfVIntrMasking = pVmcbNstGstCache->fVIntrMasking; 455 return true; 456 } 457 return false; 458 } 459 460 461 /** 462 * Gets the SVM nested-guest nested-paging bit if cached by HM. 463 * 464 * @returns @c true on success, @c false otherwise. 465 * @param pVCpu The cross context virtual CPU structure of the 466 * calling EMT. 467 * @param pfNestedPagingCtrl Where to store the nested-paging bit. Updated only 468 * when @c true is returned. 469 */ 470 VMM_INT_DECL(bool) HMGetGuestSvmNestedPaging(PCVMCPU pVCpu, bool *pfNestedPaging) 471 { 472 Assert(pfNestedPaging); 473 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 474 if (pVmcbNstGstCache->fCacheValid) 475 { 476 *pfNestedPaging = pVmcbNstGstCache->fNestedPaging; 477 return true; 478 } 479 return false; 493 480 } 494 481 … … 497 484 * Returns the nested-guest VMCB pause-filter count. 498 485 * 499 * @returns The pause-filter count. 500 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 501 */ 502 VMM_INT_DECL(uint16_t) HMGetGuestSvmPauseFilterCount(PCVMCPU pVCpu) 503 { 504 Assert(HMHasGuestSvmVmcbCached(pVCpu)); 505 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 506 return pVmcbNstGstCache->u16PauseFilterCount; 507 } 508 486 * @returns @c true on success, @c false otherwise. 487 * @param pVCpu The cross context virtual CPU structure of the 488 * calling EMT. 489 * @param pu16PauseFilterCount Where to store the pause-filter count. Only 490 * updated @c true is returned. 491 */ 492 VMM_INT_DECL(bool) HMGetGuestSvmPauseFilterCount(PCVMCPU pVCpu, uint16_t *pu16PauseFilterCount) 493 { 494 Assert(pu16PauseFilterCount); 495 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 496 if (pVmcbNstGstCache->fCacheValid) 497 { 498 *pu16PauseFilterCount = pVmcbNstGstCache->u16PauseFilterCount; 499 return true; 500 } 501 return false; 502 } 503 504 505 /** 506 * Returns the SVM nested-guest TSC offset if cached by HM. 507 * 508 * @returns The TSC offset after applying any nested-guest TSC offset. 509 * @param pVCpu The cross context virtual CPU structure of the calling 510 * EMT. 511 * @param pu64TscOffset Where to store the TSC offset. Only updated when @c 512 * true is returned. 513 */ 514 VMM_INT_DECL(bool) HMGetGuestSvmTscOffset(PCVMCPU pVCpu, uint64_t *pu64TscOffset) 515 { 516 Assert(pu64TscOffset); 517 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 518 if (pVmcbNstGstCache->fCacheValid) 519 { 520 *pu64TscOffset = pVmcbNstGstCache->u64TSCOffset; 521 return true; 522 } 523 return false; 524 } 525 -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r78707 r78869 2079 2079 uint16_t const uGuestPauseFilterCount = pVM->hm.s.svm.cPauseFilter; 2080 2080 uint16_t const uGuestPauseFilterThreshold = pVM->hm.s.svm.cPauseFilterThresholdTicks; 2081 if ( HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_PAUSE))2081 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_PAUSE)) 2082 2082 { 2083 2083 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx; … … 3337 3337 /* Apply the nested-guest VMCB's TSC offset over the guest TSC offset. */ 3338 3338 if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx)) 3339 uTscOffset = HMApplySvmNstGstTscOffset(pVCpu, uTscOffset);3339 uTscOffset = CPUMApplyNestedGuestTscOffset(pVCpu, uTscOffset); 3340 3340 #endif 3341 3341 … … 4706 4706 { 4707 4707 /* The nested-guest VMCB TSC offset shall eventually be restored on #VMEXIT via HMNotifySvmNstGstVmexit(). */ 4708 uint64_t const uGstTsc = HMRemoveSvmNstGstTscOffset(pVCpu, uHostTsc + pVmcbCtrl->u64TSCOffset);4708 uint64_t const uGstTsc = CPUMRemoveNestedGuestTscOffset(pVCpu, uHostTsc + pVmcbCtrl->u64TSCOffset); 4709 4709 TMCpuTickSetLastSeen(pVCpu, uGstTsc); 4710 4710 } … … 5180 5180 */ 5181 5181 PSVMVMCB pVmcbNstGst = pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pVmcb); 5182 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 5182 5183 PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl; 5183 5184 uint64_t const uExitCode = pVmcbNstGstCtrl->u64ExitCode; … … 5190 5191 case SVM_EXIT_CPUID: 5191 5192 { 5192 if ( HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_CPUID))5193 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_CPUID)) 5193 5194 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5194 5195 return hmR0SvmExitCpuid(pVCpu, pSvmTransient); … … 5197 5198 case SVM_EXIT_RDTSC: 5198 5199 { 5199 if ( HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_RDTSC))5200 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_RDTSC)) 5200 5201 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5201 5202 return hmR0SvmExitRdtsc(pVCpu, pSvmTransient); … … 5204 5205 case SVM_EXIT_RDTSCP: 5205 5206 { 5206 if ( HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_RDTSCP))5207 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_RDTSCP)) 5207 5208 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5208 5209 return hmR0SvmExitRdtscp(pVCpu, pSvmTransient); … … 5211 5212 case SVM_EXIT_MONITOR: 5212 5213 { 5213 if ( HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_MONITOR))5214 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_MONITOR)) 5214 5215 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5215 5216 return hmR0SvmExitMonitor(pVCpu, pSvmTransient); … … 5218 5219 case SVM_EXIT_MWAIT: 5219 5220 { 5220 if ( HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_MWAIT))5221 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_MWAIT)) 5221 5222 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5222 5223 return hmR0SvmExitMwait(pVCpu, pSvmTransient); … … 5225 5226 case SVM_EXIT_HLT: 5226 5227 { 5227 if ( HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_HLT))5228 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_HLT)) 5228 5229 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5229 5230 return hmR0SvmExitHlt(pVCpu, pSvmTransient); … … 5232 5233 case SVM_EXIT_MSR: 5233 5234 { 5234 if ( HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_MSR_PROT))5235 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_MSR_PROT)) 5235 5236 { 5236 5237 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx; … … 5269 5270 case SVM_EXIT_IOIO: 5270 5271 { 5271 if ( HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))5272 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_IOIO_PROT)) 5272 5273 { 5273 5274 void *pvIoBitmap = pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pvIoBitmap); … … 5290 5291 5291 5292 /* If the nested-guest is intercepting #PFs, cause a #PF #VMEXIT. */ 5292 if ( HMIsGuestSvmXcptInterceptSet(pVCpu, X86_XCPT_PF))5293 if (CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_PF)) 5293 5294 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, u32ErrCode, uFaultAddress); 5294 5295 … … 5303 5304 case SVM_EXIT_XCPT_UD: 5304 5305 { 5305 if ( HMIsGuestSvmXcptInterceptSet(pVCpu, X86_XCPT_UD))5306 if (CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_UD)) 5306 5307 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5307 5308 hmR0SvmSetPendingXcptUD(pVCpu); … … 5311 5312 case SVM_EXIT_XCPT_MF: 5312 5313 { 5313 if ( HMIsGuestSvmXcptInterceptSet(pVCpu, X86_XCPT_MF))5314 if (CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_MF)) 5314 5315 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5315 5316 return hmR0SvmExitXcptMF(pVCpu, pSvmTransient); … … 5318 5319 case SVM_EXIT_XCPT_DB: 5319 5320 { 5320 if ( HMIsGuestSvmXcptInterceptSet(pVCpu, X86_XCPT_DB))5321 if (CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_DB)) 5321 5322 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5322 5323 return hmR0SvmNestedExitXcptDB(pVCpu, pSvmTransient); … … 5325 5326 case SVM_EXIT_XCPT_AC: 5326 5327 { 5327 if ( HMIsGuestSvmXcptInterceptSet(pVCpu, X86_XCPT_AC))5328 if (CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_AC)) 5328 5329 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5329 5330 return hmR0SvmExitXcptAC(pVCpu, pSvmTransient); … … 5332 5333 case SVM_EXIT_XCPT_BP: 5333 5334 { 5334 if ( HMIsGuestSvmXcptInterceptSet(pVCpu, X86_XCPT_BP))5335 if (CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_BP)) 5335 5336 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5336 5337 return hmR0SvmNestedExitXcptBP(pVCpu, pSvmTransient); … … 5342 5343 { 5343 5344 uint8_t const uCr = uExitCode - SVM_EXIT_READ_CR0; 5344 if ( HMIsGuestSvmReadCRxInterceptSet(pVCpu, uCr))5345 if (CPUMIsGuestSvmReadCRxInterceptSet(pVCpu, pCtx, uCr)) 5345 5346 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5346 5347 return hmR0SvmExitReadCRx(pVCpu, pSvmTransient); … … 5349 5350 case SVM_EXIT_CR0_SEL_WRITE: 5350 5351 { 5351 if ( HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_CR0_SEL_WRITE))5352 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_CR0_SEL_WRITE)) 5352 5353 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5353 5354 return hmR0SvmExitWriteCRx(pVCpu, pSvmTransient); … … 5362 5363 Log4Func(("Write CR%u: uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uCr, uExitInfo1, uExitInfo2)); 5363 5364 5364 if ( HMIsGuestSvmWriteCRxInterceptSet(pVCpu, uCr))5365 if (CPUMIsGuestSvmWriteCRxInterceptSet(pVCpu, pCtx, uCr)) 5365 5366 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5366 5367 return hmR0SvmExitWriteCRx(pVCpu, pSvmTransient); … … 5369 5370 case SVM_EXIT_PAUSE: 5370 5371 { 5371 if ( HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_PAUSE))5372 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_PAUSE)) 5372 5373 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5373 5374 return hmR0SvmExitPause(pVCpu, pSvmTransient); … … 5376 5377 case SVM_EXIT_VINTR: 5377 5378 { 5378 if ( HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_VINTR))5379 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VINTR)) 5379 5380 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5380 5381 return hmR0SvmExitUnexpected(pVCpu, pSvmTransient); … … 5402 5403 case SVM_EXIT_FERR_FREEZE: 5403 5404 { 5404 if ( HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_FERR_FREEZE))5405 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_FERR_FREEZE)) 5405 5406 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5406 5407 return hmR0SvmExitFerrFreeze(pVCpu, pSvmTransient); … … 5409 5410 case SVM_EXIT_INVLPG: 5410 5411 { 5411 if ( HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_INVLPG))5412 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INVLPG)) 5412 5413 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5413 5414 return hmR0SvmExitInvlpg(pVCpu, pSvmTransient); … … 5416 5417 case SVM_EXIT_WBINVD: 5417 5418 { 5418 if ( HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_WBINVD))5419 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_WBINVD)) 5419 5420 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5420 5421 return hmR0SvmExitWbinvd(pVCpu, pSvmTransient); … … 5423 5424 case SVM_EXIT_INVD: 5424 5425 { 5425 if ( HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_INVD))5426 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INVD)) 5426 5427 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5427 5428 return hmR0SvmExitInvd(pVCpu, pSvmTransient); … … 5430 5431 case SVM_EXIT_RDPMC: 5431 5432 { 5432 if ( HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_RDPMC))5433 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_RDPMC)) 5433 5434 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5434 5435 return hmR0SvmExitRdpmc(pVCpu, pSvmTransient); … … 5445 5446 { 5446 5447 uint8_t const uDr = uExitCode - SVM_EXIT_READ_DR0; 5447 if ( HMIsGuestSvmReadDRxInterceptSet(pVCpu, uDr))5448 if (CPUMIsGuestSvmReadDRxInterceptSet(pVCpu, pCtx, uDr)) 5448 5449 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5449 5450 return hmR0SvmExitReadDRx(pVCpu, pSvmTransient); … … 5456 5457 { 5457 5458 uint8_t const uDr = uExitCode - SVM_EXIT_WRITE_DR0; 5458 if ( HMIsGuestSvmWriteDRxInterceptSet(pVCpu, uDr))5459 if (CPUMIsGuestSvmWriteDRxInterceptSet(pVCpu, pCtx, uDr)) 5459 5460 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5460 5461 return hmR0SvmExitWriteDRx(pVCpu, pSvmTransient); … … 5486 5487 { 5487 5488 uint8_t const uVector = uExitCode - SVM_EXIT_XCPT_0; 5488 if ( HMIsGuestSvmXcptInterceptSet(pVCpu, uVector))5489 if (CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, uVector)) 5489 5490 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5490 5491 return hmR0SvmExitXcptGeneric(pVCpu, pSvmTransient); … … 5493 5494 case SVM_EXIT_XSETBV: 5494 5495 { 5495 if ( HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_XSETBV))5496 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_XSETBV)) 5496 5497 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5497 5498 return hmR0SvmExitXsetbv(pVCpu, pSvmTransient); … … 5500 5501 case SVM_EXIT_TASK_SWITCH: 5501 5502 { 5502 if ( HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))5503 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_TASK_SWITCH)) 5503 5504 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5504 5505 return hmR0SvmExitTaskSwitch(pVCpu, pSvmTransient); … … 5507 5508 case SVM_EXIT_IRET: 5508 5509 { 5509 if ( HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_IRET))5510 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_IRET)) 5510 5511 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5511 5512 return hmR0SvmExitIret(pVCpu, pSvmTransient); … … 5514 5515 case SVM_EXIT_SHUTDOWN: 5515 5516 { 5516 if ( HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))5517 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_SHUTDOWN)) 5517 5518 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5518 5519 return hmR0SvmExitShutdown(pVCpu, pSvmTransient); … … 5521 5522 case SVM_EXIT_VMMCALL: 5522 5523 { 5523 if ( HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_VMMCALL))5524 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VMMCALL)) 5524 5525 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5525 5526 return hmR0SvmExitVmmCall(pVCpu, pSvmTransient); … … 5528 5529 case SVM_EXIT_CLGI: 5529 5530 { 5530 if ( HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_CLGI))5531 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_CLGI)) 5531 5532 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5532 5533 return hmR0SvmExitClgi(pVCpu, pSvmTransient); … … 5535 5536 case SVM_EXIT_STGI: 5536 5537 { 5537 if ( HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_STGI))5538 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_STGI)) 5538 5539 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5539 5540 return hmR0SvmExitStgi(pVCpu, pSvmTransient); … … 5542 5543 case SVM_EXIT_VMLOAD: 5543 5544 { 5544 if ( HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_VMLOAD))5545 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VMLOAD)) 5545 5546 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5546 5547 return hmR0SvmExitVmload(pVCpu, pSvmTransient); … … 5549 5550 case SVM_EXIT_VMSAVE: 5550 5551 { 5551 if ( HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_VMSAVE))5552 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VMSAVE)) 5552 5553 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5553 5554 return hmR0SvmExitVmsave(pVCpu, pSvmTransient); … … 5556 5557 case SVM_EXIT_INVLPGA: 5557 5558 { 5558 if ( HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_INVLPGA))5559 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INVLPGA)) 5559 5560 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5560 5561 return hmR0SvmExitInvlpga(pVCpu, pSvmTransient); … … 5563 5564 case SVM_EXIT_VMRUN: 5564 5565 { 5565 if ( HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_VMRUN))5566 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VMRUN)) 5566 5567 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5567 5568 return hmR0SvmExitVmrun(pVCpu, pSvmTransient); … … 5570 5571 case SVM_EXIT_RSM: 5571 5572 { 5572 if ( HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_RSM))5573 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_RSM)) 5573 5574 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5574 5575 hmR0SvmSetPendingXcptUD(pVCpu); … … 5578 5579 case SVM_EXIT_SKINIT: 5579 5580 { 5580 if ( HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_SKINIT))5581 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_SKINIT)) 5581 5582 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5582 5583 hmR0SvmSetPendingXcptUD(pVCpu); … … 7572 7573 /* If the nested-guest is intercepting #PFs, cause a #PF #VMEXIT. */ 7573 7574 if ( CPUMIsGuestInSvmNestedHwVirtMode(pCtx) 7574 && HMIsGuestSvmXcptInterceptSet(pVCpu, X86_XCPT_PF))7575 && CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_PF)) 7575 7576 return VBOXSTRICTRC_TODO(IEMExecSvmVmexit(pVCpu, SVM_EXIT_XCPT_PF, uErrCode, uFaultAddress)); 7576 7577 #endif
Note:
See TracChangeset
for help on using the changeset viewer.