VirtualBox

Ignore:
Timestamp:
Mar 21, 2018 9:29:22 AM (7 years ago)
Author:
vboxsync
Message:

VMM/HM: Get rid of lazy FPU loading for AMD-V. Nested Hw.virt: Fix FPU related issues while executing nested-KVM DSL guests.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r71383 r71415  
    180180 * while executing the guest or nested-guest.
    181181 */
    182 #define HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS           (  SVM_CTRL_INTERCEPT_INTR        \
    183                                                          | SVM_CTRL_INTERCEPT_NMI         \
    184                                                          | SVM_CTRL_INTERCEPT_INIT        \
    185                                                          | SVM_CTRL_INTERCEPT_RDPMC       \
    186                                                          | SVM_CTRL_INTERCEPT_CPUID       \
    187                                                          | SVM_CTRL_INTERCEPT_RSM         \
    188                                                          | SVM_CTRL_INTERCEPT_HLT         \
    189                                                          | SVM_CTRL_INTERCEPT_IOIO_PROT   \
    190                                                          | SVM_CTRL_INTERCEPT_MSR_PROT    \
    191                                                          | SVM_CTRL_INTERCEPT_INVLPGA     \
    192                                                          | SVM_CTRL_INTERCEPT_SHUTDOWN    \
    193                                                          | SVM_CTRL_INTERCEPT_FERR_FREEZE \
    194                                                          | SVM_CTRL_INTERCEPT_VMRUN       \
    195                                                          | SVM_CTRL_INTERCEPT_SKINIT      \
    196                                                          | SVM_CTRL_INTERCEPT_WBINVD      \
    197                                                          | SVM_CTRL_INTERCEPT_MONITOR     \
    198                                                          | SVM_CTRL_INTERCEPT_MWAIT       \
     182#define HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS           (  SVM_CTRL_INTERCEPT_INTR          \
     183                                                         | SVM_CTRL_INTERCEPT_NMI           \
     184                                                         | SVM_CTRL_INTERCEPT_INIT          \
     185                                                         | SVM_CTRL_INTERCEPT_RDPMC         \
     186                                                         | SVM_CTRL_INTERCEPT_CPUID         \
     187                                                         | SVM_CTRL_INTERCEPT_RSM           \
     188                                                         | SVM_CTRL_INTERCEPT_HLT           \
     189                                                         | SVM_CTRL_INTERCEPT_IOIO_PROT     \
     190                                                         | SVM_CTRL_INTERCEPT_MSR_PROT      \
     191                                                         | SVM_CTRL_INTERCEPT_INVLPGA       \
     192                                                         | SVM_CTRL_INTERCEPT_SHUTDOWN      \
     193                                                         | SVM_CTRL_INTERCEPT_FERR_FREEZE   \
     194                                                         | SVM_CTRL_INTERCEPT_VMRUN         \
     195                                                         | SVM_CTRL_INTERCEPT_SKINIT        \
     196                                                         | SVM_CTRL_INTERCEPT_WBINVD        \
     197                                                         | SVM_CTRL_INTERCEPT_MONITOR       \
     198                                                         | SVM_CTRL_INTERCEPT_MWAIT         \
     199                                                         | SVM_CTRL_INTERCEPT_CR0_SEL_WRITE \
    199200                                                         | SVM_CTRL_INTERCEPT_XSETBV)
    200201
     
    360361static FNSVMEXITHANDLER hmR0SvmExitIret;
    361362static FNSVMEXITHANDLER hmR0SvmExitXcptPF;
    362 static FNSVMEXITHANDLER hmR0SvmExitXcptNM;
    363363static FNSVMEXITHANDLER hmR0SvmExitXcptUD;
    364364static FNSVMEXITHANDLER hmR0SvmExitXcptMF;
     
    951951                                       | SVM_CTRL_INTERCEPT_VMMCALL;
    952952
    953         /* CR0, CR4 reads must be intercepted, our shadow values are not necessarily the same as the guest's. */
     953        /*
     954         * CR0, CR4 reads/writes must be intercepted, as our shadow values may differ from the guest's.
     955         * These interceptions might be relaxed later during VM execution if the conditions allow.
     956         */
    954957        pVmcb->ctrl.u16InterceptRdCRx = RT_BIT(0) | RT_BIT(4);
    955 
    956         /* CR0, CR4 writes must be intercepted for the same reasons as above. */
    957958        pVmcb->ctrl.u16InterceptWrCRx = RT_BIT(0) | RT_BIT(4);
    958959
     
    14261427static void hmR0SvmLoadSharedCR0(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
    14271428{
    1428     uint64_t u64GuestCR0 = pCtx->cr0;
     1429    /* The guest FPU is now always pre-loaded before executing guest code, see @bugref{7243#c101}. */
     1430    Assert(CPUMIsGuestFPUStateActive(pVCpu));
     1431
     1432    uint64_t const uGuestCr0  = pCtx->cr0;
     1433    uint64_t       uShadowCr0 = uGuestCr0;
    14291434
    14301435    /* Always enable caching. */
    1431     u64GuestCR0 &= ~(X86_CR0_CD | X86_CR0_NW);
     1436    uShadowCr0 &= ~(X86_CR0_CD | X86_CR0_NW);
     1437
     1438    /* When Nested Paging is not available use shadow page tables and intercept #PFs (the latter done in SVMR0SetupVM()). */
     1439    if (!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging)
     1440    {
     1441        uShadowCr0 |= X86_CR0_PG      /* Use shadow page tables. */
     1442                   |  X86_CR0_WP;     /* Guest CPL 0 writes to its read-only pages should cause a #PF #VMEXIT. */
     1443    }
    14321444
    14331445    /*
    1434      * When Nested Paging is not available use shadow page tables and intercept #PFs (the latter done in SVMR0SetupVM()).
     1446     * If the shadow and guest CR0 are identical we can avoid intercepting CR0 reads.
     1447     *
     1448     * CR0 writes still needs interception as PGM requires tracking paging mode changes, see @bugref{6944}.
     1449     * We also don't ever want to honor weird things like cache disable from the guest. However, we can
     1450     * avoid intercepting changes to the TS & MP bits by clearing the CR0 write intercept below and keeping
     1451     * SVM_CTRL_INTERCEPT_CR0_SEL_WRITE instead.
    14351452     */
    1436     if (!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging)
    1437     {
    1438         u64GuestCR0 |= X86_CR0_PG      /* When Nested Paging is not available, use shadow page tables. */
    1439                     |  X86_CR0_WP;     /* Guest CPL 0 writes to its read-only pages should cause a #PF #VMEXIT. */
    1440     }
    1441 
    1442     /*
    1443      * Guest FPU bits.
    1444      */
    1445     bool fInterceptNM = false;
    1446     bool fInterceptMF = false;
    1447     u64GuestCR0 |= X86_CR0_NE;         /* Use internal x87 FPU exceptions handling rather than external interrupts. */
    1448     if (CPUMIsGuestFPUStateActive(pVCpu))
    1449     {
    1450         /* Catch floating point exceptions if we need to report them to the guest in a different way. */
    1451         if (!(pCtx->cr0 & X86_CR0_NE))
    1452         {
    1453             Log4(("hmR0SvmLoadSharedCR0: Intercepting Guest CR0.MP Old-style FPU handling!!!\n"));
    1454             fInterceptMF = true;
     1453    if (uShadowCr0 == uGuestCr0)
     1454    {
     1455        if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
     1456        {
     1457            pVmcb->ctrl.u16InterceptRdCRx &= ~RT_BIT(0);
     1458            pVmcb->ctrl.u16InterceptWrCRx &= ~RT_BIT(0);
     1459            Assert(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_CR0_SEL_WRITE);
     1460        }
     1461        else
     1462        {
     1463            /* If the nested-hypervisor intercepts CR0 reads/writes, we need to continue intercepting them. */
     1464            PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
     1465            Assert(pCtx->hwvirt.svm.fHMCachedVmcb);
     1466            pVmcb->ctrl.u16InterceptRdCRx = (pVmcb->ctrl.u16InterceptRdCRx       & ~RT_BIT(0))
     1467                                          | (pVmcbNstGstCache->u16InterceptRdCRx &  RT_BIT(0));
     1468            pVmcb->ctrl.u16InterceptWrCRx = (pVmcb->ctrl.u16InterceptWrCRx       & ~RT_BIT(0))
     1469                                          | (pVmcbNstGstCache->u16InterceptWrCRx &  RT_BIT(0));
    14551470        }
    14561471    }
    14571472    else
    14581473    {
    1459         fInterceptNM = true;           /* Guest FPU inactive, #VMEXIT on #NM for lazy FPU loading. */
    1460         u64GuestCR0 |= X86_CR0_TS      /* Guest can task switch quickly and do lazy FPU syncing. */
    1461                     |  X86_CR0_MP;     /* FWAIT/WAIT should not ignore CR0.TS and should generate #NM. */
    1462     }
    1463 
    1464     /*
    1465      * Update the exception intercept bitmap.
    1466      */
    1467     if (fInterceptNM)
    1468         hmR0SvmAddXcptIntercept(pVmcb, X86_XCPT_NM);
    1469     else
    1470         hmR0SvmRemoveXcptIntercept(pVCpu, pCtx, pVmcb, X86_XCPT_NM);
    1471 
    1472     if (fInterceptMF)
    1473         hmR0SvmAddXcptIntercept(pVmcb, X86_XCPT_MF);
    1474     else
    1475         hmR0SvmRemoveXcptIntercept(pVCpu, pCtx, pVmcb, X86_XCPT_MF);
    1476 
    1477     pVmcb->guest.u64CR0 = u64GuestCR0;
    1478     pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
     1474        pVmcb->ctrl.u16InterceptRdCRx |= RT_BIT(0);
     1475        pVmcb->ctrl.u16InterceptWrCRx |= RT_BIT(0);
     1476    }
     1477    pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
     1478
     1479    Assert(RT_HI_U32(uShadowCr0) == 0);
     1480    if (pVmcb->guest.u64CR0 != uShadowCr0)
     1481    {
     1482        pVmcb->guest.u64CR0 = uShadowCr0;
     1483        pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
     1484    }
    14791485}
    14801486
     
    20462052         * - VMMCALL: Exclude the outer guest intercept as when it's also not intercepted by
    20472053         *   the nested-guest, the physical CPU raises a \#UD exception as expected.
     2054         *
     2055         * - SVM_CTRL_INTERCEPT_CR0_SEL_WRITE: Is always required as we want to track PGM mode
     2056         *   changes and not honor cache disable changes even by the nested-guest.
    20482057         */
    20492058        pVmcbNstGst->ctrl.u64InterceptCtrl  |= (pVmcb->ctrl.u64InterceptCtrl & ~(  SVM_CTRL_INTERCEPT_VINTR
     
    23182327    PCSVMVMCBCTRL       pVmcbNstGstCtrl  = &pVmcbNstGst->ctrl;
    23192328    PCSVMVMCBSTATESAVE  pVmcbNstGstState = &pVmcbNstGst->guest;
    2320     PSVMNESTEDVMCBCACHE pNstGstVmcbCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
     2329    PSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
    23212330
    23222331    /*
     
    23302339    if (!fWasCached)
    23312340    {
    2332         pNstGstVmcbCache->u16InterceptRdCRx = pVmcbNstGstCtrl->u16InterceptRdCRx;
    2333         pNstGstVmcbCache->u16InterceptWrCRx = pVmcbNstGstCtrl->u16InterceptWrCRx;
    2334         pNstGstVmcbCache->u16InterceptRdDRx = pVmcbNstGstCtrl->u16InterceptRdDRx;
    2335         pNstGstVmcbCache->u16InterceptWrDRx = pVmcbNstGstCtrl->u16InterceptWrDRx;
    2336         pNstGstVmcbCache->u32InterceptXcpt  = pVmcbNstGstCtrl->u32InterceptXcpt;
    2337         pNstGstVmcbCache->u64InterceptCtrl  = pVmcbNstGstCtrl->u64InterceptCtrl;
    2338         pNstGstVmcbCache->u64CR0            = pVmcbNstGstState->u64CR0;
    2339         pNstGstVmcbCache->u64CR3            = pVmcbNstGstState->u64CR3;
    2340         pNstGstVmcbCache->u64CR4            = pVmcbNstGstState->u64CR4;
    2341         pNstGstVmcbCache->u64EFER           = pVmcbNstGstState->u64EFER;
    2342         pNstGstVmcbCache->u64DBGCTL         = pVmcbNstGstState->u64DBGCTL;
    2343         pNstGstVmcbCache->u64IOPMPhysAddr   = pVmcbNstGstCtrl->u64IOPMPhysAddr;
    2344         pNstGstVmcbCache->u64MSRPMPhysAddr  = pVmcbNstGstCtrl->u64MSRPMPhysAddr;
    2345         pNstGstVmcbCache->u64TSCOffset      = pVmcbNstGstCtrl->u64TSCOffset;
    2346         pNstGstVmcbCache->u32VmcbCleanBits  = pVmcbNstGstCtrl->u32VmcbCleanBits;
    2347         pNstGstVmcbCache->fVIntrMasking     = pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking;
    2348         pNstGstVmcbCache->TLBCtrl           = pVmcbNstGstCtrl->TLBCtrl;
    2349         pNstGstVmcbCache->u1NestedPaging    = pVmcbNstGstCtrl->NestedPaging.n.u1NestedPaging;
    2350         pNstGstVmcbCache->u1LbrVirt         = pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt;
     2341        pVmcbNstGstCache->u16InterceptRdCRx = pVmcbNstGstCtrl->u16InterceptRdCRx;
     2342        pVmcbNstGstCache->u16InterceptWrCRx = pVmcbNstGstCtrl->u16InterceptWrCRx;
     2343        pVmcbNstGstCache->u16InterceptRdDRx = pVmcbNstGstCtrl->u16InterceptRdDRx;
     2344        pVmcbNstGstCache->u16InterceptWrDRx = pVmcbNstGstCtrl->u16InterceptWrDRx;
     2345        pVmcbNstGstCache->u32InterceptXcpt  = pVmcbNstGstCtrl->u32InterceptXcpt;
     2346        pVmcbNstGstCache->u64InterceptCtrl  = pVmcbNstGstCtrl->u64InterceptCtrl;
     2347        pVmcbNstGstCache->u64CR0            = pVmcbNstGstState->u64CR0;
     2348        pVmcbNstGstCache->u64CR3            = pVmcbNstGstState->u64CR3;
     2349        pVmcbNstGstCache->u64CR4            = pVmcbNstGstState->u64CR4;
     2350        pVmcbNstGstCache->u64EFER           = pVmcbNstGstState->u64EFER;
     2351        pVmcbNstGstCache->u64DBGCTL         = pVmcbNstGstState->u64DBGCTL;
     2352        pVmcbNstGstCache->u64IOPMPhysAddr   = pVmcbNstGstCtrl->u64IOPMPhysAddr;
     2353        pVmcbNstGstCache->u64MSRPMPhysAddr  = pVmcbNstGstCtrl->u64MSRPMPhysAddr;
     2354        pVmcbNstGstCache->u64TSCOffset      = pVmcbNstGstCtrl->u64TSCOffset;
     2355        pVmcbNstGstCache->u32VmcbCleanBits  = pVmcbNstGstCtrl->u32VmcbCleanBits;
     2356        pVmcbNstGstCache->fVIntrMasking     = pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking;
     2357        pVmcbNstGstCache->TLBCtrl           = pVmcbNstGstCtrl->TLBCtrl;
     2358        pVmcbNstGstCache->u1NestedPaging    = pVmcbNstGstCtrl->NestedPaging.n.u1NestedPaging;
     2359        pVmcbNstGstCache->u1LbrVirt         = pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt;
    23512360        pCtx->hwvirt.svm.fHMCachedVmcb      = true;
    23522361        Log4(("hmR0SvmVmRunCacheVmcb: Cached VMCB fields\n"));
     
    25692578
    25702579    /*
    2571      * Guest Control registers: CR2, CR3 (handled at the end) - accesses to other control registers are always intercepted.
     2580     * Guest Control registers: CR0, CR2, CR3 (handled at the end) - accesses to other control registers are always intercepted.
    25722581     */
    25732582    pMixedCtx->cr2        = pVmcb->guest.u64CR2;
     2583
     2584    /* If we're not intercepting changes to CR0 TS & MP bits, sync those bits here. */
     2585    if (!(pVmcb->ctrl.u16InterceptWrCRx & RT_BIT(0)))
     2586    {
     2587        pMixedCtx->cr0 = (pMixedCtx->cr0      & ~(X86_CR0_TS | X86_CR0_MP))
     2588                       | (pVmcb->guest.u64CR0 &  (X86_CR0_TS | X86_CR0_MP));
     2589    }
    25742590
    25752591    /*
     
    42474263    hmR0SvmInjectPendingEvent(pVCpu, pCtx, pVmcbNstGst);
    42484264
    4249     if (   pVCpu->hm.s.fPreloadGuestFpu
    4250         && !CPUMIsGuestFPUStateActive(pVCpu))
    4251     {
     4265    if (!CPUMIsGuestFPUStateActive(pVCpu))
     4266    {
     4267        STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestFpuState, x);
    42524268        CPUMR0LoadGuestFPU(pVM, pVCpu); /* (Ignore rc, no need to set HM_CHANGED_HOST_CONTEXT for SVM.) */
     4269        STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestFpuState, x);
     4270        STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadGuestFpu);
    42534271        HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
    42544272    }
     
    43594377    hmR0SvmInjectPendingEvent(pVCpu, pCtx, pVmcb);
    43604378
    4361     if (   pVCpu->hm.s.fPreloadGuestFpu
    4362         && !CPUMIsGuestFPUStateActive(pVCpu))
    4363     {
     4379    if (!CPUMIsGuestFPUStateActive(pVCpu))
     4380    {
     4381        STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestFpuState, x);
    43644382        CPUMR0LoadGuestFPU(pVM, pVCpu); /* (Ignore rc, no need to set HM_CHANGED_HOST_CONTEXT for SVM.) */
     4383        STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestFpuState, x);
     4384        STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadGuestFpu);
    43654385        HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
    43664386    }
     
    51345154        }
    51355155
    5136         case SVM_EXIT_EXCEPTION_7:   /* X86_XCPT_NM */
    5137         {
    5138             if (HMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_NM))
    5139                 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    5140             hmR0SvmSetPendingXcptNM(pVCpu);
    5141             return VINF_SUCCESS;
    5142         }
    5143 
    51445156        case SVM_EXIT_EXCEPTION_6:   /* X86_XCPT_UD */
    51455157        {
     
    51545166            if (HMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_MF))
    51555167                return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    5156             hmR0SvmSetPendingXcptMF(pVCpu);
    5157             return VINF_SUCCESS;
     5168            return hmR0SvmExitXcptMF(pVCpu, pCtx, pSvmTransient);
    51585169        }
    51595170
     
    51895200        }
    51905201
     5202        case SVM_EXIT_CR0_SEL_WRITE:
     5203        {
     5204            if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_CR0_SEL_WRITE))
     5205                return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     5206            return hmR0SvmExitWriteCRx(pVCpu, pCtx, pSvmTransient);
     5207        }
     5208
    51915209        case SVM_EXIT_WRITE_CR0:
    51925210        case SVM_EXIT_WRITE_CR3:
    51935211        case SVM_EXIT_WRITE_CR4:
    5194         case SVM_EXIT_WRITE_CR8:   /** @todo Shouldn't writes to CR8 go to V_TPR instead since we run with V_INTR_MASKING set?? */
     5212        case SVM_EXIT_WRITE_CR8:   /** @todo Shouldn't writes to CR8 go to V_TPR instead since we run with V_INTR_MASKING set? */
    51955213        {
    51965214            uint8_t const uCr = uExitCode - SVM_EXIT_WRITE_CR0;
     
    52945312                case SVM_EXIT_EXCEPTION_0:     /*case SVM_EXIT_EXCEPTION_1:*/   case SVM_EXIT_EXCEPTION_2:
    52955313                /*case SVM_EXIT_EXCEPTION_3:*/   case SVM_EXIT_EXCEPTION_4:     case SVM_EXIT_EXCEPTION_5:
    5296                 /*case SVM_EXIT_EXCEPTION_6:*/ /*case SVM_EXIT_EXCEPTION_7:*/   case SVM_EXIT_EXCEPTION_8:
     5314                /*case SVM_EXIT_EXCEPTION_6:*/   case SVM_EXIT_EXCEPTION_7:     case SVM_EXIT_EXCEPTION_8:
    52975315                case SVM_EXIT_EXCEPTION_9:       case SVM_EXIT_EXCEPTION_10:    case SVM_EXIT_EXCEPTION_11:
    52985316                case SVM_EXIT_EXCEPTION_12:      case SVM_EXIT_EXCEPTION_13:  /*case SVM_EXIT_EXCEPTION_14:*/
     
    54765494            return hmR0SvmExitXcptPF(pVCpu, pCtx, pSvmTransient);
    54775495
    5478         case SVM_EXIT_EXCEPTION_7:   /* X86_XCPT_NM */
    5479             return hmR0SvmExitXcptNM(pVCpu, pCtx, pSvmTransient);
    5480 
    54815496        case SVM_EXIT_EXCEPTION_6:   /* X86_XCPT_UD */
    54825497            return hmR0SvmExitXcptUD(pVCpu, pCtx, pSvmTransient);
     
    55085523            return hmR0SvmExitReadCRx(pVCpu, pCtx, pSvmTransient);
    55095524
     5525        case SVM_EXIT_CR0_SEL_WRITE:
    55105526        case SVM_EXIT_WRITE_CR0:
    55115527        case SVM_EXIT_WRITE_CR3:
     
    55135529        case SVM_EXIT_WRITE_CR8:
    55145530        {
    5515             uint8_t const uCr = uExitCode - SVM_EXIT_WRITE_CR0;
     5531            uint8_t const uCr = uExitCode == SVM_EXIT_CR0_SEL_WRITE ? 0 : uExitCode - SVM_EXIT_WRITE_CR0;
    55165532            Log4(("hmR0SvmHandleExit: Write CR%u\n", uCr)); NOREF(uCr);
    55175533            return hmR0SvmExitWriteCRx(pVCpu, pCtx, pSvmTransient);
     
    56155631                case SVM_EXIT_EXCEPTION_5:             /* X86_XCPT_BR */
    56165632                /*   SVM_EXIT_EXCEPTION_6: */          /* X86_XCPT_UD - Handled above. */
    5617                 /*   SVM_EXIT_EXCEPTION_7: */          /* X86_XCPT_NM - Handled above. */
     5633                case SVM_EXIT_EXCEPTION_7:             /* X86_XCPT_NM */
    56185634                case SVM_EXIT_EXCEPTION_8:             /* X86_XCPT_DF */
    56195635                case SVM_EXIT_EXCEPTION_9:             /* X86_XCPT_CO_SEG_OVERRUN */
     
    65216537    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
    65226538
    6523     uint8_t const iCrReg = pSvmTransient->u64ExitCode - SVM_EXIT_WRITE_CR0;
     6539    uint64_t const uExitCode = pSvmTransient->u64ExitCode;
     6540    uint8_t  const iCrReg    = uExitCode == SVM_EXIT_CR0_SEL_WRITE ? 0 : (pSvmTransient->u64ExitCode - SVM_EXIT_WRITE_CR0);
    65246541    Assert(iCrReg <= 15);
    65256542
     
    73757392
    73767393/**
    7377  * \#VMEXIT handler for device-not-available exceptions (SVM_EXIT_EXCEPTION_7).
    7378  * Conditional \#VMEXIT.
    7379  */
    7380 HMSVM_EXIT_DECL hmR0SvmExitXcptNM(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
    7381 {
    7382     HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
    7383 
    7384     /* Paranoia; Ensure we cannot be called as a result of event delivery. */
    7385     PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
    7386     Assert(!pVmcb->ctrl.ExitIntInfo.n.u1Valid); NOREF(pVmcb);
    7387 
    7388     /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
    7389     VMMRZCallRing3Disable(pVCpu);
    7390     HM_DISABLE_PREEMPT();
    7391 
    7392     int rc;
    7393     /* If the guest FPU was active at the time of the #NM exit, then it's a guest fault. */
    7394     if (pSvmTransient->fWasGuestFPUStateActive)
    7395     {
    7396         rc = VINF_EM_RAW_GUEST_TRAP;
    7397         Assert(CPUMIsGuestFPUStateActive(pVCpu) || HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0));
    7398     }
    7399     else
    7400     {
    7401 #ifndef HMSVM_ALWAYS_TRAP_ALL_XCPTS
    7402         Assert(!pSvmTransient->fWasGuestFPUStateActive);
    7403 #endif
    7404         rc = CPUMR0Trap07Handler(pVCpu->CTX_SUFF(pVM), pVCpu); /* (No need to set HM_CHANGED_HOST_CONTEXT for SVM.) */
    7405         Assert(   rc == VINF_EM_RAW_GUEST_TRAP
    7406                || ((rc == VINF_SUCCESS || rc == VINF_CPUM_HOST_CR0_MODIFIED) && CPUMIsGuestFPUStateActive(pVCpu)));
    7407     }
    7408 
    7409     HM_RESTORE_PREEMPT();
    7410     VMMRZCallRing3Enable(pVCpu);
    7411 
    7412     if (rc == VINF_SUCCESS || rc == VINF_CPUM_HOST_CR0_MODIFIED)
    7413     {
    7414         /* Guest FPU state was activated, we'll want to change CR0 FPU intercepts before the next VM-reentry. */
    7415         HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
    7416         STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM);
    7417         pVCpu->hm.s.fPreloadGuestFpu = true;
    7418     }
    7419     else
    7420     {
    7421         /* Forward #NM to the guest. */
    7422         Assert(rc == VINF_EM_RAW_GUEST_TRAP);
    7423         hmR0SvmSetPendingXcptNM(pVCpu);
    7424         STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM);
    7425     }
    7426     return VINF_SUCCESS;
    7427 }
    7428 
    7429 
    7430 /**
    74317394 * \#VMEXIT handler for undefined opcode (SVM_EXIT_EXCEPTION_6).
    74327395 * Conditional \#VMEXIT.
     
    74817444
    74827445    /* Paranoia; Ensure we cannot be called as a result of event delivery. */
    7483     PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
     7446    PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
    74847447    Assert(!pVmcb->ctrl.ExitIntInfo.n.u1Valid); NOREF(pVmcb);
    74857448
     
    74947457        if (RT_SUCCESS(rc))
    74957458        {
     7459#ifdef VBOX_WITH_NESTED_HWVIRT
     7460            if (   CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
     7461                && HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_FERR_FREEZE))
     7462            {
     7463                return VBOXSTRICTRC_TODO(IEMExecSvmVmexit(pVCpu, SVM_EXIT_FERR_FREEZE, 0, 0));
     7464            }
     7465#endif
    74967466            /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
    7497             /** @todo FERR intercept when in nested-guest mode?   */
    7498             rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
     7467            rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
    74997468            if (RT_SUCCESS(rc))
    75007469                pCtx->rip += cbOp;
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette