VirtualBox

Changeset 78869 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
May 30, 2019 8:32:28 AM (6 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
130981
Message:

VMM: Nested SVM: bugref:7243 Get rid of HMHasGuestSvmVmcbCached as a separate function call.

Location:
trunk/src/VBox/VMM
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp

    r78868 r78869  
    29752975    {
    29762976        PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
     2977        Assert(pVmcs);
    29772978        if (CPUMIsGuestVmxProcCtlsSet(pVCpu, pCtx, VMX_PROC_CTLS_USE_TSC_OFFSETTING))
    29782979            return uTicks + pVmcs->u64TscOffset.u;
     
    29822983    if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
    29832984    {
    2984         /** @todo r=bird: Bake HMApplySvmNstGstTscOffset into HMHasGuestSvmVmcbCached to save a call. */
    2985         if (!HMHasGuestSvmVmcbCached(pVCpu))
     2985        uint64_t u64TscOffset;
     2986        if (!HMGetGuestSvmTscOffset(pVCpu, &u64TscOffset))
    29862987        {
    29872988            PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
    2988             return uTicks + pVmcb->ctrl.u64TSCOffset;
     2989            Assert(pVmcb);
     2990            u64TscOffset = pVmcb->ctrl.u64TSCOffset;
    29892991        }
    2990         return HMApplySvmNstGstTscOffset(pVCpu, uTicks);
     2992        return uTicks + u64TscOffset;
    29912993    }
    29922994#else
     
    30163018        {
    30173019            PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
     3020            Assert(pVmcs);
    30183021            return uTicks - pVmcs->u64TscOffset.u;
    30193022        }
     
    30233026    if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
    30243027    {
    3025         /** @todo r=bird: Bake HMApplySvmNstGstTscOffset into HMRemoveSvmNstGstTscOffset to save a call. */
    3026         if (!HMHasGuestSvmVmcbCached(pVCpu))
     3028        uint64_t u64TscOffset;
     3029        if (!HMGetGuestSvmTscOffset(pVCpu, &u64TscOffset))
    30273030        {
    30283031            PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
    3029             return uTicks - pVmcb->ctrl.u64TSCOffset;
     3032            Assert(pVmcb);
     3033            u64TscOffset = pVmcb->ctrl.u64TSCOffset;
    30303034        }
    3031         return HMRemoveSvmNstGstTscOffset(pVCpu, uTicks);
     3035        return uTicks - u64TscOffset;
    30323036    }
    30333037#else
  • trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp

    r78866 r78869  
    189189    bool const fUseVGif = fVGif && pVM->hm.s.svm.fVGif;
    190190    return fVGif && fUseVGif;
    191 }
    192 
    193 
    194 /**
    195  * Applies the TSC offset of an SVM nested-guest if any and returns the new TSC
    196  * value for the nested-guest.
    197  *
    198  * @returns The TSC offset after applying any nested-guest TSC offset.
    199  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    200  * @param   uTicks      The guest TSC.
    201  *
    202  * @remarks This function looks at the VMCB cache rather than directly at the
    203  *          nested-guest VMCB. The latter may have been modified for executing
    204  *          using hardware-assisted SVM.
    205  *
    206  * @sa      CPUMRemoveNestedGuestTscOffset, HMRemoveSvmNstGstTscOffset.
    207  */
    208 VMM_INT_DECL(uint64_t) HMApplySvmNstGstTscOffset(PCVMCPU pVCpu, uint64_t uTicks)
    209 {
    210     PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    211     Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx)); RT_NOREF(pCtx);
    212     PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
    213     Assert(pVmcbNstGstCache->fCacheValid);
    214     return uTicks + pVmcbNstGstCache->u64TSCOffset;
    215 }
    216 
    217 
    218 /**
    219  * Removes the TSC offset of an SVM nested-guest if any and returns the new TSC
    220  * value for the guest.
    221  *
    222  * @returns The TSC offset after removing any nested-guest TSC offset.
    223  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    224  * @param   uTicks      The nested-guest TSC.
    225  *
    226  * @remarks This function looks at the VMCB cache rather than directly at the
    227  *          nested-guest VMCB. The latter may have been modified for executing
    228  *          using hardware-assisted SVM.
    229  *
    230  * @sa      CPUMApplyNestedGuestTscOffset, HMApplySvmNstGstTscOffset.
    231  */
    232 VMM_INT_DECL(uint64_t) HMRemoveSvmNstGstTscOffset(PCVMCPU pVCpu, uint64_t uTicks)
    233 {
    234     PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    235     Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx)); RT_NOREF(pCtx);
    236     PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
    237     Assert(pVmcbNstGstCache->fCacheValid);
    238     return uTicks - pVmcbNstGstCache->u64TSCOffset;
    239191}
    240192
     
    354306
    355307/**
    356  * Returns whether HM has cached the nested-guest VMCB.
    357  *
    358  * If the VMCB is cached by HM, it means HM may have potentially modified the
    359  * VMCB for execution using hardware-assisted SVM.
    360  *
    361  * @returns true if HM has cached the nested-guest VMCB, false otherwise.
    362  * @param   pVCpu   The cross context virtual CPU structure of the calling EMT.
    363  */
    364 VMM_INT_DECL(bool) HMHasGuestSvmVmcbCached(PCVMCPU pVCpu)
    365 {
    366     PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
    367     return pVmcbNstGstCache->fCacheValid;
    368 }
    369 
    370 
    371 /**
    372  * Checks if the nested-guest VMCB has the specified ctrl/instruction intercept
    373  * active.
    374  *
    375  * @returns @c true if in intercept is set, @c false otherwise.
    376  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    377  * @param   fIntercept  The SVM control/instruction intercept, see
    378  *                      SVM_CTRL_INTERCEPT_*.
    379  */
    380 VMM_INT_DECL(bool) HMIsGuestSvmCtrlInterceptSet(PCVMCPU pVCpu, uint64_t fIntercept)
    381 {
    382     Assert(HMHasGuestSvmVmcbCached(pVCpu));
    383     PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
    384     return RT_BOOL(pVmcbNstGstCache->u64InterceptCtrl & fIntercept);
    385 }
    386 
    387 
    388 /**
    389  * Checks if the nested-guest VMCB has the specified CR read intercept active.
    390  *
    391  * @returns @c true if in intercept is set, @c false otherwise.
    392  * @param   pVCpu   The cross context virtual CPU structure of the calling EMT.
    393  * @param   uCr     The CR register number (0 to 15).
    394  */
    395 VMM_INT_DECL(bool) HMIsGuestSvmReadCRxInterceptSet(PCVMCPU pVCpu, uint8_t uCr)
    396 {
    397     Assert(uCr < 16);
    398     Assert(HMHasGuestSvmVmcbCached(pVCpu));
    399     PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
    400     return RT_BOOL(pVmcbNstGstCache->u16InterceptRdCRx & (1 << uCr));
    401 }
    402 
    403 
    404 /**
    405  * Checks if the nested-guest VMCB has the specified CR write intercept active.
    406  *
    407  * @returns @c true if in intercept is set, @c false otherwise.
    408  * @param   pVCpu   The cross context virtual CPU structure of the calling EMT.
    409  * @param   uCr     The CR register number (0 to 15).
    410  */
    411 VMM_INT_DECL(bool) HMIsGuestSvmWriteCRxInterceptSet(PCVMCPU pVCpu, uint8_t uCr)
    412 {
    413     Assert(uCr < 16);
    414     Assert(HMHasGuestSvmVmcbCached(pVCpu));
    415     PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
    416     return RT_BOOL(pVmcbNstGstCache->u16InterceptWrCRx & (1 << uCr));
    417 }
    418 
    419 
    420 /**
    421  * Checks if the nested-guest VMCB has the specified DR read intercept active.
    422  *
    423  * @returns @c true if in intercept is set, @c false otherwise.
    424  * @param   pVCpu   The cross context virtual CPU structure of the calling EMT.
    425  * @param   uDr     The DR register number (0 to 15).
    426  */
    427 VMM_INT_DECL(bool) HMIsGuestSvmReadDRxInterceptSet(PCVMCPU pVCpu, uint8_t uDr)
    428 {
    429     Assert(uDr < 16);
    430     Assert(HMHasGuestSvmVmcbCached(pVCpu));
    431     PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
    432     return RT_BOOL(pVmcbNstGstCache->u16InterceptRdDRx & (1 << uDr));
    433 }
    434 
    435 
    436 /**
    437  * Checks if the nested-guest VMCB has the specified DR write intercept active.
    438  *
    439  * @returns @c true if in intercept is set, @c false otherwise.
    440  * @param   pVCpu   The cross context virtual CPU structure of the calling EMT.
    441  * @param   uDr     The DR register number (0 to 15).
    442  */
    443 VMM_INT_DECL(bool) HMIsGuestSvmWriteDRxInterceptSet(PCVMCPU pVCpu, uint8_t uDr)
    444 {
    445     Assert(uDr < 16);
    446     Assert(HMHasGuestSvmVmcbCached(pVCpu));
    447     PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
    448     return RT_BOOL(pVmcbNstGstCache->u16InterceptWrDRx & (1 << uDr));
    449 }
    450 
    451 
    452 /**
    453  * Checks if the nested-guest VMCB has the specified exception intercept active.
    454  *
    455  * @returns true if in intercept is active, false otherwise.
    456  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    457  * @param   uVector     The exception / interrupt vector.
    458  */
    459 VMM_INT_DECL(bool) HMIsGuestSvmXcptInterceptSet(PCVMCPU pVCpu, uint8_t uVector)
    460 {
    461     Assert(uVector < 32);
    462     Assert(HMHasGuestSvmVmcbCached(pVCpu));
    463     PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
    464     return RT_BOOL(pVmcbNstGstCache->u32InterceptXcpt & (1 << uVector));
     308 * Gets the SVM nested-guest control intercepts if cached by HM.
     309 *
     310 * @returns @c true on success, @c false otherwise.
     311 * @param   pVCpu           The cross context virtual CPU structure of the calling
     312 *                          EMT.
     313 * @param   pu64Intercepts  Where to store the control intercepts. Only updated when
     314 *                          @c true is returned.
     315 */
     316VMM_INT_DECL(bool) HMGetGuestSvmCtrlIntercepts(PCVMCPU pVCpu, uint64_t *pu64Intercepts)
     317{
     318    Assert(pu64Intercepts);
     319    PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
     320    if (pVmcbNstGstCache->fCacheValid)
     321    {
     322        *pu64Intercepts = pVmcbNstGstCache->u64InterceptCtrl;
     323        return true;
     324    }
     325    return false;
     326}
     327
     328
     329/**
     330 * Gets the SVM nested-guest CRx-read intercepts if cached by HM.
     331 *
     332 * @returns @c true on success, @c false otherwise.
     333 * @param   pVCpu           The cross context virtual CPU structure of the calling
     334 *                          EMT.
     335 * @param   pu16Intercepts  Where to store the CRx-read intercepts. Only updated
     336 *                          when @c true is returned.
     337 */
     338VMM_INT_DECL(bool) HMGetGuestSvmReadCRxIntercepts(PCVMCPU pVCpu, uint16_t *pu16Intercepts)
     339{
     340    Assert(pu16Intercepts);
     341    PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
     342    if (pVmcbNstGstCache->fCacheValid)
     343    {
     344        *pu16Intercepts = pVmcbNstGstCache->u16InterceptRdCRx;
     345        return true;
     346    }
     347    return false;
     348}
     349
     350
     351/**
     352 * Gets the SVM nested-guest CRx-write intercepts if cached by HM.
     353 *
     354 * @returns @c true on success, @c false otherwise.
     355 * @param   pVCpu           The cross context virtual CPU structure of the calling
     356 *                          EMT.
     357 * @param   pu16Intercepts  Where to store the CRx-write intercepts. Only updated
     358 *                          when @c true is returned.
     359 */
     360VMM_INT_DECL(bool) HMGetGuestSvmWriteCRxIntercepts(PCVMCPU pVCpu, uint16_t *pu16Intercepts)
     361{
     362    Assert(pu16Intercepts);
     363    PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
     364    if (pVmcbNstGstCache->fCacheValid)
     365    {
     366        *pu16Intercepts = pVmcbNstGstCache->u16InterceptWrCRx;
     367        return true;
     368    }
     369    return false;
     370}
     371
     372
     373/**
     374 * Gets the SVM nested-guest DRx-read intercepts if cached by HM.
     375 *
     376 * @returns @c true on success, @c false otherwise.
     377 * @param   pVCpu           The cross context virtual CPU structure of the calling
     378 *                          EMT.
     379 * @param   pu16Intercepts  Where to store the DRx-read intercepts. Only updated
     380 *                          when @c true is returned.
     381 */
     382VMM_INT_DECL(bool) HMGetGuestSvmReadDRxIntercepts(PCVMCPU pVCpu, uint16_t *pu16Intercepts)
     383{
     384    Assert(pu16Intercepts);
     385    PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
     386    if (pVmcbNstGstCache->fCacheValid)
     387    {
     388        *pu16Intercepts = pVmcbNstGstCache->u16InterceptRdDRx;
     389        return true;
     390    }
     391    return false;
     392}
     393
     394
     395/**
     396 * Gets the SVM nested-guest DRx-write intercepts if cached by HM.
     397 *
     398 * @returns @c true on success, @c false otherwise.
     399 * @param   pVCpu           The cross context virtual CPU structure of the calling
     400 *                          EMT.
     401 * @param   pu16Intercepts  Where to store the DRx-write intercepts. Only updated
     402 *                          when @c true is returned.
     403 */
     404VMM_INT_DECL(bool) HMGetGuestSvmWriteDRxIntercepts(PCVMCPU pVCpu, uint16_t *pu16Intercepts)
     405{
     406    Assert(pu16Intercepts);
     407    PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
     408    if (pVmcbNstGstCache->fCacheValid)
     409    {
     410        *pu16Intercepts = pVmcbNstGstCache->u16InterceptWrDRx;
     411        return true;
     412    }
     413    return false;
     414}
     415
     416
     417/**
     418 * Gets the SVM nested-guest exception intercepts if cached by HM.
     419 *
     420 * @returns @c true on success, @c false otherwise.
     421 * @param   pVCpu           The cross context virtual CPU structure of the calling
     422 *                          EMT.
     423 * @param   pu32Intercepts  Where to store the exception intercepts. Only updated
     424 *                          when @c true is returned.
     425 */
     426VMM_INT_DECL(bool) HMGetGuestSvmXcptIntercepts(PCVMCPU pVCpu, uint32_t *pu32Intercepts)
     427{
     428    Assert(pu32Intercepts);
     429    PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
     430    if (pVmcbNstGstCache->fCacheValid)
     431    {
     432        *pu32Intercepts = pVmcbNstGstCache->u32InterceptXcpt;
     433        return true;
     434    }
     435    return false;
    465436}
    466437
     
    469440 * Checks if the nested-guest VMCB has virtual-interrupts masking enabled.
    470441 *
    471  * @returns true if virtual-interrupts are masked, @c false otherwise.
    472  * @param   pVCpu   The cross context virtual CPU structure of the calling EMT.
    473  */
    474 VMM_INT_DECL(bool) HMIsGuestSvmVirtIntrMasking(PCVMCPU pVCpu)
    475 {
    476     Assert(HMHasGuestSvmVmcbCached(pVCpu));
    477     PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
    478     return pVmcbNstGstCache->fVIntrMasking;
    479 }
    480 
    481 
    482 /**
    483  * Checks if the nested-guest VMCB has nested-paging enabled.
    484  *
    485  * @returns true if nested-paging is enabled, @c false otherwise.
    486  * @param   pVCpu   The cross context virtual CPU structure of the calling EMT.
    487  */
    488 VMM_INT_DECL(bool) HMIsGuestSvmNestedPagingEnabled(PCVMCPU pVCpu)
    489 {
    490     Assert(HMHasGuestSvmVmcbCached(pVCpu));
    491     PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
    492     return pVmcbNstGstCache->fNestedPaging;
     442 * @returns @c true on success, @c false otherwise.
     443 * @param   pVCpu           The cross context virtual CPU structure of the calling
     444 *                          EMT.
     445 * @param   pfVIntrMasking  Where to store the virtual-interrupt masking bit.
     446 *                          Updated only when @c true is returned.
     447 */
     448VMM_INT_DECL(bool) HMGetGuestSvmVirtIntrMasking(PCVMCPU pVCpu, bool *pfVIntrMasking)
     449{
     450    Assert(pfVIntrMasking);
     451    PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
     452    if (pVmcbNstGstCache->fCacheValid)
     453    {
     454        *pfVIntrMasking = pVmcbNstGstCache->fVIntrMasking;
     455        return true;
     456    }
     457    return false;
     458}
     459
     460
     461/**
     462 * Gets the SVM nested-guest nested-paging bit if cached by HM.
     463 *
     464 * @returns @c true on success, @c false otherwise.
     465 * @param   pVCpu               The cross context virtual CPU structure of the
     466 *                              calling EMT.
     467 * @param   pfNestedPagingCtrl  Where to store the nested-paging bit. Updated only
     468 *                              when @c true is returned.
     469 */
     470VMM_INT_DECL(bool) HMGetGuestSvmNestedPaging(PCVMCPU pVCpu, bool *pfNestedPaging)
     471{
     472    Assert(pfNestedPaging);
     473    PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
     474    if (pVmcbNstGstCache->fCacheValid)
     475    {
     476        *pfNestedPaging = pVmcbNstGstCache->fNestedPaging;
     477        return true;
     478    }
     479    return false;
    493480}
    494481
     
    497484 * Returns the nested-guest VMCB pause-filter count.
    498485 *
    499  * @returns The pause-filter count.
    500  * @param   pVCpu   The cross context virtual CPU structure of the calling EMT.
    501  */
    502 VMM_INT_DECL(uint16_t) HMGetGuestSvmPauseFilterCount(PCVMCPU pVCpu)
    503 {
    504     Assert(HMHasGuestSvmVmcbCached(pVCpu));
    505     PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
    506     return pVmcbNstGstCache->u16PauseFilterCount;
    507 }
    508 
     486 * @returns @c true on success, @c false otherwise.
     487 * @param   pVCpu                   The cross context virtual CPU structure of the
     488 *                                  calling EMT.
     489 * @param   pu16PauseFilterCount    Where to store the pause-filter count. Only
     490 *                                  updated @c true is returned.
     491 */
     492VMM_INT_DECL(bool) HMGetGuestSvmPauseFilterCount(PCVMCPU pVCpu, uint16_t *pu16PauseFilterCount)
     493{
     494    Assert(pu16PauseFilterCount);
     495    PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
     496    if (pVmcbNstGstCache->fCacheValid)
     497    {
     498        *pu16PauseFilterCount = pVmcbNstGstCache->u16PauseFilterCount;
     499        return true;
     500    }
     501    return false;
     502}
     503
     504
     505/**
     506 * Returns the SVM nested-guest TSC offset if cached by HM.
     507 *
     508 * @returns The TSC offset after applying any nested-guest TSC offset.
     509 * @param   pVCpu           The cross context virtual CPU structure of the calling
     510 *                          EMT.
     511 * @param   pu64TscOffset   Where to store the TSC offset. Only updated when @c
     512 *                          true is returned.
     513 */
     514VMM_INT_DECL(bool) HMGetGuestSvmTscOffset(PCVMCPU pVCpu, uint64_t *pu64TscOffset)
     515{
     516    Assert(pu64TscOffset);
     517    PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
     518    if (pVmcbNstGstCache->fCacheValid)
     519    {
     520        *pu64TscOffset = pVmcbNstGstCache->u64TSCOffset;
     521        return true;
     522    }
     523    return false;
     524}
     525
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r78707 r78869  
    20792079        uint16_t const uGuestPauseFilterCount     = pVM->hm.s.svm.cPauseFilter;
    20802080        uint16_t const uGuestPauseFilterThreshold = pVM->hm.s.svm.cPauseFilterThresholdTicks;
    2081         if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_PAUSE))
     2081        if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_PAUSE))
    20822082        {
    20832083            PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
     
    33373337        /* Apply the nested-guest VMCB's TSC offset over the guest TSC offset. */
    33383338        if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
    3339             uTscOffset = HMApplySvmNstGstTscOffset(pVCpu, uTscOffset);
     3339            uTscOffset = CPUMApplyNestedGuestTscOffset(pVCpu, uTscOffset);
    33403340#endif
    33413341
     
    47064706        {
    47074707            /* The nested-guest VMCB TSC offset shall eventually be restored on #VMEXIT via HMNotifySvmNstGstVmexit(). */
    4708             uint64_t const uGstTsc = HMRemoveSvmNstGstTscOffset(pVCpu, uHostTsc + pVmcbCtrl->u64TSCOffset);
     4708            uint64_t const uGstTsc = CPUMRemoveNestedGuestTscOffset(pVCpu, uHostTsc + pVmcbCtrl->u64TSCOffset);
    47094709            TMCpuTickSetLastSeen(pVCpu, uGstTsc);
    47104710        }
     
    51805180     */
    51815181    PSVMVMCB       pVmcbNstGst     = pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pVmcb);
     5182    PCCPUMCTX      pCtx            = &pVCpu->cpum.GstCtx;
    51825183    PSVMVMCBCTRL   pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
    51835184    uint64_t const uExitCode       = pVmcbNstGstCtrl->u64ExitCode;
     
    51905191        case SVM_EXIT_CPUID:
    51915192        {
    5192             if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_CPUID))
     5193            if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_CPUID))
    51935194                NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    51945195            return hmR0SvmExitCpuid(pVCpu, pSvmTransient);
     
    51975198        case SVM_EXIT_RDTSC:
    51985199        {
    5199             if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_RDTSC))
     5200            if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_RDTSC))
    52005201                NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    52015202            return hmR0SvmExitRdtsc(pVCpu, pSvmTransient);
     
    52045205        case SVM_EXIT_RDTSCP:
    52055206        {
    5206             if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_RDTSCP))
     5207            if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_RDTSCP))
    52075208                NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    52085209            return hmR0SvmExitRdtscp(pVCpu, pSvmTransient);
     
    52115212        case SVM_EXIT_MONITOR:
    52125213        {
    5213             if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_MONITOR))
     5214            if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_MONITOR))
    52145215                NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    52155216            return hmR0SvmExitMonitor(pVCpu, pSvmTransient);
     
    52185219        case SVM_EXIT_MWAIT:
    52195220        {
    5220             if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_MWAIT))
     5221            if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_MWAIT))
    52215222                NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    52225223            return hmR0SvmExitMwait(pVCpu, pSvmTransient);
     
    52255226        case SVM_EXIT_HLT:
    52265227        {
    5227             if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_HLT))
     5228            if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_HLT))
    52285229                NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    52295230            return hmR0SvmExitHlt(pVCpu, pSvmTransient);
     
    52325233        case SVM_EXIT_MSR:
    52335234        {
    5234             if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_MSR_PROT))
     5235            if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_MSR_PROT))
    52355236            {
    52365237                uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
     
    52695270        case SVM_EXIT_IOIO:
    52705271        {
    5271             if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
     5272            if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_IOIO_PROT))
    52725273            {
    52735274                void *pvIoBitmap = pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pvIoBitmap);
     
    52905291
    52915292                /* If the nested-guest is intercepting #PFs, cause a #PF #VMEXIT. */
    5292                 if (HMIsGuestSvmXcptInterceptSet(pVCpu, X86_XCPT_PF))
     5293                if (CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_PF))
    52935294                    NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, u32ErrCode, uFaultAddress);
    52945295
     
    53035304        case SVM_EXIT_XCPT_UD:
    53045305        {
    5305             if (HMIsGuestSvmXcptInterceptSet(pVCpu, X86_XCPT_UD))
     5306            if (CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_UD))
    53065307                NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    53075308            hmR0SvmSetPendingXcptUD(pVCpu);
     
    53115312        case SVM_EXIT_XCPT_MF:
    53125313        {
    5313             if (HMIsGuestSvmXcptInterceptSet(pVCpu, X86_XCPT_MF))
     5314            if (CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_MF))
    53145315                NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    53155316            return hmR0SvmExitXcptMF(pVCpu, pSvmTransient);
     
    53185319        case SVM_EXIT_XCPT_DB:
    53195320        {
    5320             if (HMIsGuestSvmXcptInterceptSet(pVCpu, X86_XCPT_DB))
     5321            if (CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_DB))
    53215322                NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    53225323            return hmR0SvmNestedExitXcptDB(pVCpu, pSvmTransient);
     
    53255326        case SVM_EXIT_XCPT_AC:
    53265327        {
    5327             if (HMIsGuestSvmXcptInterceptSet(pVCpu, X86_XCPT_AC))
     5328            if (CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_AC))
    53285329                NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    53295330            return hmR0SvmExitXcptAC(pVCpu, pSvmTransient);
     
    53325333        case SVM_EXIT_XCPT_BP:
    53335334        {
    5334             if (HMIsGuestSvmXcptInterceptSet(pVCpu, X86_XCPT_BP))
     5335            if (CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_BP))
    53355336                NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    53365337            return hmR0SvmNestedExitXcptBP(pVCpu, pSvmTransient);
     
    53425343        {
    53435344            uint8_t const uCr = uExitCode - SVM_EXIT_READ_CR0;
    5344             if (HMIsGuestSvmReadCRxInterceptSet(pVCpu, uCr))
     5345            if (CPUMIsGuestSvmReadCRxInterceptSet(pVCpu, pCtx, uCr))
    53455346                NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    53465347            return hmR0SvmExitReadCRx(pVCpu, pSvmTransient);
     
    53495350        case SVM_EXIT_CR0_SEL_WRITE:
    53505351        {
    5351             if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_CR0_SEL_WRITE))
     5352            if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_CR0_SEL_WRITE))
    53525353                NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    53535354            return hmR0SvmExitWriteCRx(pVCpu, pSvmTransient);
     
    53625363            Log4Func(("Write CR%u: uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uCr, uExitInfo1, uExitInfo2));
    53635364
    5364             if (HMIsGuestSvmWriteCRxInterceptSet(pVCpu, uCr))
     5365            if (CPUMIsGuestSvmWriteCRxInterceptSet(pVCpu, pCtx, uCr))
    53655366                NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    53665367            return hmR0SvmExitWriteCRx(pVCpu, pSvmTransient);
     
    53695370        case SVM_EXIT_PAUSE:
    53705371        {
    5371             if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_PAUSE))
     5372            if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_PAUSE))
    53725373                NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    53735374            return hmR0SvmExitPause(pVCpu, pSvmTransient);
     
    53765377        case SVM_EXIT_VINTR:
    53775378        {
    5378             if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_VINTR))
     5379            if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VINTR))
    53795380                NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    53805381            return hmR0SvmExitUnexpected(pVCpu, pSvmTransient);
     
    54025403        case SVM_EXIT_FERR_FREEZE:
    54035404        {
    5404             if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_FERR_FREEZE))
     5405            if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_FERR_FREEZE))
    54055406                NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    54065407            return hmR0SvmExitFerrFreeze(pVCpu, pSvmTransient);
     
    54095410        case SVM_EXIT_INVLPG:
    54105411        {
    5411             if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_INVLPG))
     5412            if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INVLPG))
    54125413                NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    54135414            return hmR0SvmExitInvlpg(pVCpu, pSvmTransient);
     
    54165417        case SVM_EXIT_WBINVD:
    54175418        {
    5418             if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_WBINVD))
     5419            if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_WBINVD))
    54195420                NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    54205421            return hmR0SvmExitWbinvd(pVCpu, pSvmTransient);
     
    54235424        case SVM_EXIT_INVD:
    54245425        {
    5425             if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_INVD))
     5426            if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INVD))
    54265427                NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    54275428            return hmR0SvmExitInvd(pVCpu, pSvmTransient);
     
    54305431        case SVM_EXIT_RDPMC:
    54315432        {
    5432             if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_RDPMC))
     5433            if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_RDPMC))
    54335434                NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    54345435            return hmR0SvmExitRdpmc(pVCpu, pSvmTransient);
     
    54455446                {
    54465447                    uint8_t const uDr = uExitCode - SVM_EXIT_READ_DR0;
    5447                     if (HMIsGuestSvmReadDRxInterceptSet(pVCpu, uDr))
     5448                    if (CPUMIsGuestSvmReadDRxInterceptSet(pVCpu, pCtx, uDr))
    54485449                        NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    54495450                    return hmR0SvmExitReadDRx(pVCpu, pSvmTransient);
     
    54565457                {
    54575458                    uint8_t const uDr = uExitCode - SVM_EXIT_WRITE_DR0;
    5458                     if (HMIsGuestSvmWriteDRxInterceptSet(pVCpu, uDr))
     5459                    if (CPUMIsGuestSvmWriteDRxInterceptSet(pVCpu, pCtx, uDr))
    54595460                        NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    54605461                    return hmR0SvmExitWriteDRx(pVCpu, pSvmTransient);
     
    54865487                {
    54875488                    uint8_t const uVector = uExitCode - SVM_EXIT_XCPT_0;
    5488                     if (HMIsGuestSvmXcptInterceptSet(pVCpu, uVector))
     5489                    if (CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, uVector))
    54895490                        NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    54905491                    return hmR0SvmExitXcptGeneric(pVCpu, pSvmTransient);
     
    54935494                case SVM_EXIT_XSETBV:
    54945495                {
    5495                     if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_XSETBV))
     5496                    if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_XSETBV))
    54965497                        NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    54975498                    return hmR0SvmExitXsetbv(pVCpu, pSvmTransient);
     
    55005501                case SVM_EXIT_TASK_SWITCH:
    55015502                {
    5502                     if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
     5503                    if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_TASK_SWITCH))
    55035504                        NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    55045505                    return hmR0SvmExitTaskSwitch(pVCpu, pSvmTransient);
     
    55075508                case SVM_EXIT_IRET:
    55085509                {
    5509                     if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_IRET))
     5510                    if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_IRET))
    55105511                        NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    55115512                    return hmR0SvmExitIret(pVCpu, pSvmTransient);
     
    55145515                case SVM_EXIT_SHUTDOWN:
    55155516                {
    5516                     if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
     5517                    if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_SHUTDOWN))
    55175518                        NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    55185519                    return hmR0SvmExitShutdown(pVCpu, pSvmTransient);
     
    55215522                case SVM_EXIT_VMMCALL:
    55225523                {
    5523                     if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_VMMCALL))
     5524                    if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VMMCALL))
    55245525                        NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    55255526                    return hmR0SvmExitVmmCall(pVCpu, pSvmTransient);
     
    55285529                case SVM_EXIT_CLGI:
    55295530                {
    5530                     if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_CLGI))
     5531                    if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_CLGI))
    55315532                        NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    55325533                     return hmR0SvmExitClgi(pVCpu, pSvmTransient);
     
    55355536                case SVM_EXIT_STGI:
    55365537                {
    5537                     if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_STGI))
     5538                    if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_STGI))
    55385539                        NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    55395540                     return hmR0SvmExitStgi(pVCpu, pSvmTransient);
     
    55425543                case SVM_EXIT_VMLOAD:
    55435544                {
    5544                     if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_VMLOAD))
     5545                    if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VMLOAD))
    55455546                        NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    55465547                    return hmR0SvmExitVmload(pVCpu, pSvmTransient);
     
    55495550                case SVM_EXIT_VMSAVE:
    55505551                {
    5551                     if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_VMSAVE))
     5552                    if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VMSAVE))
    55525553                        NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    55535554                    return hmR0SvmExitVmsave(pVCpu, pSvmTransient);
     
    55565557                case SVM_EXIT_INVLPGA:
    55575558                {
    5558                     if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_INVLPGA))
     5559                    if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INVLPGA))
    55595560                        NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    55605561                    return hmR0SvmExitInvlpga(pVCpu, pSvmTransient);
     
    55635564                case SVM_EXIT_VMRUN:
    55645565                {
    5565                     if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_VMRUN))
     5566                    if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VMRUN))
    55665567                        NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    55675568                    return hmR0SvmExitVmrun(pVCpu, pSvmTransient);
     
    55705571                case SVM_EXIT_RSM:
    55715572                {
    5572                     if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_RSM))
     5573                    if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_RSM))
    55735574                        NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    55745575                    hmR0SvmSetPendingXcptUD(pVCpu);
     
    55785579                case SVM_EXIT_SKINIT:
    55795580                {
    5580                     if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_SKINIT))
     5581                    if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_SKINIT))
    55815582                        NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    55825583                    hmR0SvmSetPendingXcptUD(pVCpu);
     
    75727573            /* If the nested-guest is intercepting #PFs, cause a #PF #VMEXIT. */
    75737574            if (   CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
    7574                 && HMIsGuestSvmXcptInterceptSet(pVCpu, X86_XCPT_PF))
     7575                && CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_PF))
    75757576                return VBOXSTRICTRC_TODO(IEMExecSvmVmexit(pVCpu, SVM_EXIT_XCPT_PF, uErrCode, uFaultAddress));
    75767577#endif
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette