VirtualBox

Changeset 70258 in vbox for trunk/src


Ignore:
Timestamp:
Dec 21, 2017 6:52:11 AM (7 years ago)
Author:
vboxsync
Message:

VMM/HM: Access SVM VMCB clean bits as a 32-bit field.

Location:
trunk/src/VBox/VMM
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp

    r70178 r70258  
    363363        pVmcbNstGstState->u64CR4                  = pNstGstVmcbCache->u64CR4;
    364364        pVmcbNstGstState->u64EFER                 = pNstGstVmcbCache->u64EFER;
    365         pVmcbNstGstCtrl->u64VmcbCleanBits         = pNstGstVmcbCache->u64VmcbCleanBits;
     365        pVmcbNstGstCtrl->u32VmcbCleanBits         = pNstGstVmcbCache->u32VmcbCleanBits;
    366366        pVmcbNstGstCtrl->u64IOPMPhysAddr          = pNstGstVmcbCache->u64IOPMPhysAddr;
    367367        pVmcbNstGstCtrl->u64MSRPMPhysAddr         = pNstGstVmcbCache->u64MSRPMPhysAddr;
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r70254 r70258  
    777777        ASMBitClear(pbMsrBitmap, uMsrpmBit + 1);
    778778
    779     pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;
     779    pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;
    780780}
    781781
     
    862862
    863863        /* Initially set all VMCB clean bits to 0 indicating that everything should be loaded from the VMCB in memory. */
    864         pVmcb->ctrl.u64VmcbCleanBits = 0;
     864        pVmcb->ctrl.u32VmcbCleanBits = 0;
    865865
    866866        /* The host ASID MBZ, for the guest start with 1. */
     
    10491049
    10501050        /* Clear the VMCB Clean Bit for NP while flushing the TLB. See @bugref{7152}. */
    1051         pVmcb->ctrl.u64VmcbCleanBits    &= ~HMSVM_VMCB_CLEAN_NP;
     1051        pVmcb->ctrl.u32VmcbCleanBits    &= ~HMSVM_VMCB_CLEAN_NP;
    10521052
    10531053        /* Keep track of last CPU ID even when flushing all the time. */
     
    10611061        {
    10621062            /* Clear the VMCB Clean Bit for NP while flushing the TLB. See @bugref{7152}. */
    1063             pVmcb->ctrl.u64VmcbCleanBits    &= ~HMSVM_VMCB_CLEAN_NP;
     1063            pVmcb->ctrl.u32VmcbCleanBits    &= ~HMSVM_VMCB_CLEAN_NP;
    10641064
    10651065            if (fNewAsid)
     
    11021102    {
    11031103        pVmcb->ctrl.TLBCtrl.n.u32ASID = pVCpu->hm.s.uCurrentAsid;
    1104         pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_ASID;
     1104        pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_ASID;
    11051105    }
    11061106
     
    12231223    {
    12241224        pVmcb->ctrl.u32InterceptXcpt |= RT_BIT(u32Xcpt);
    1225         pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
     1225        pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
    12261226    }
    12271227}
     
    12631263        {
    12641264            pVmcb->ctrl.u32InterceptXcpt &= ~RT_BIT(u32Xcpt);
    1265             pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
     1265            pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
    12661266        }
    12671267    }
     
    13381338
    13391339    pVmcb->guest.u64CR0 = u64GuestCR0;
    1340     pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
     1340    pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
    13411341}
    13421342
     
    13621362    {
    13631363        pVmcb->guest.u64CR2 = pCtx->cr2;
    1364         pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CR2;
     1364        pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CR2;
    13651365        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR2);
    13661366    }
     
    13821382
    13831383            pVmcb->ctrl.u64NestedPagingCR3 = PGMGetNestedCR3(pVCpu, enmShwPagingMode);
    1384             pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_NP;
     1384            pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_NP;
    13851385            Assert(pVmcb->ctrl.u64NestedPagingCR3);
    13861386            pVmcb->guest.u64CR3 = pCtx->cr3;
     
    13921392        }
    13931393
    1394         pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
     1394        pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
    13951395        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR3);
    13961396    }
     
    14391439
    14401440        pVmcb->guest.u64CR4 = u64GuestCR4;
    1441         pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
     1441        pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
    14421442
    14431443        /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
     
    14741474
    14751475        pVmcb->guest.u8CPL = pCtx->ss.Attr.n.u2Dpl;
    1476         pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_SEG;
     1476        pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_SEG;
    14771477        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS);
    14781478    }
     
    14971497        pVmcb->guest.GDTR.u32Limit = pCtx->gdtr.cbGdt;
    14981498        pVmcb->guest.GDTR.u64Base  = pCtx->gdtr.pGdt;
    1499         pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT;
     1499        pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT;
    15001500        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_GDTR);
    15011501    }
     
    15061506        pVmcb->guest.IDTR.u32Limit = pCtx->idtr.cbIdt;
    15071507        pVmcb->guest.IDTR.u64Base  = pCtx->idtr.pIdt;
    1508         pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT;
     1508        pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT;
    15091509        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_IDTR);
    15101510    }
     
    15361536    {
    15371537        pVmcb->guest.u64EFER = pCtx->msrEFER | MSR_K6_EFER_SVME;
    1538         pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
     1538        pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
    15391539        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_EFER_MSR);
    15401540    }
     
    15521552        {
    15531553            pVmcb->guest.u64EFER &= ~MSR_K6_EFER_LME;
    1554             pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
     1554            pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
    15551555        }
    15561556    }
     
    16291629            pVmcb->guest.u64DR7 = CPUMGetHyperDR7(pVCpu);
    16301630            pVmcb->guest.u64DR6 = X86_DR6_INIT_VAL;
    1631             pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
     1631            pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
    16321632            pVCpu->hm.s.fUsingHyperDR7 = true;
    16331633        }
     
    16481648            pVmcb->guest.u64DR7 = pCtx->dr[7];
    16491649            pVmcb->guest.u64DR6 = pCtx->dr[6];
    1650             pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
     1650            pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
    16511651            pVCpu->hm.s.fUsingHyperDR7 = false;
    16521652        }
     
    17051705            pVmcb->ctrl.u16InterceptRdDRx = 0xffff;
    17061706            pVmcb->ctrl.u16InterceptWrDRx = 0xffff;
    1707             pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
     1707            pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
    17081708        }
    17091709    }
     
    17151715            pVmcb->ctrl.u16InterceptRdDRx = 0;
    17161716            pVmcb->ctrl.u16InterceptWrDRx = 0;
    1717             pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
     1717            pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
    17181718        }
    17191719    }
     
    17361736        pVmcbNstGst->ctrl.IntCtrl.n.u1VIntrMasking = 1;
    17371737        pVCpu->hm.s.svm.fSyncVTpr = false;
    1738         pVmcbNstGst->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_TPR;
     1738        pVmcbNstGst->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_TPR;
    17391739
    17401740        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
     
    18001800            }
    18011801
    1802             pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR);
     1802            pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR);
    18031803        }
    18041804    }
     
    18971897
    18981898        /* Finally, update the VMCB clean bits. */
    1899         pVmcbNstGst->ctrl.u64VmcbCleanBits  &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
     1899        pVmcbNstGst->ctrl.u32VmcbCleanBits  &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
    19001900
    19011901        Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS));
     
    21572157        pNstGstVmcbCache->u64MSRPMPhysAddr  = pVmcbNstGstCtrl->u64MSRPMPhysAddr;
    21582158        pNstGstVmcbCache->u64TSCOffset      = pVmcbNstGstCtrl->u64TSCOffset;
    2159         pNstGstVmcbCache->u64VmcbCleanBits  = pVmcbNstGstCtrl->u64VmcbCleanBits;
     2159        pNstGstVmcbCache->u32VmcbCleanBits  = pVmcbNstGstCtrl->u32VmcbCleanBits;
    21602160        pNstGstVmcbCache->fVIntrMasking     = pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking;
    21612161        pNstGstVmcbCache->TLBCtrl           = pVmcbNstGstCtrl->TLBCtrl;
     
    27602760
    27612761    /* Finally update the VMCB clean bits since we touched the intercepts as well as the TSC offset. */
    2762     pVmcbNstGstCtrl->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
     2762    pVmcbNstGstCtrl->u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
    27632763
    27642764    if (fParavirtTsc)
     
    27982798        STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
    27992799    }
    2800     pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
     2800    pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
    28012801
    28022802    /** @todo later optimize this to be done elsewhere and not before every
     
    31463146        pVmcb->ctrl.IntCtrl.n.u1VIrqPending = 1;
    31473147        pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_VINTR;
    3148         pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR);
     3148        pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR);
    31493149        Log4(("Set VINTR intercept\n"));
    31503150    }
     
    31663166        pVmcb->ctrl.IntCtrl.n.u1VIrqPending = 0;
    31673167        pVmcb->ctrl.u64InterceptCtrl &= ~SVM_CTRL_INTERCEPT_VINTR;
    3168         pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR);
     3168        pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR);
    31693169        Log4(("Cleared VINTR intercept\n"));
    31703170    }
     
    31843184    {
    31853185        pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_IRET;
    3186         pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
     3186        pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
    31873187
    31883188        Log4(("Setting IRET intercept\n"));
     
    32013201    {
    32023202        pVmcb->ctrl.u64InterceptCtrl &= ~SVM_CTRL_INTERCEPT_IRET;
    3203         pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS);
     3203        pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS);
    32043204
    32053205        Log4(("Clearing IRET intercept\n"));
     
    35203520        hmR0DumpRegs(pVM, pVCpu, pCtx); NOREF(pVM);
    35213521#ifdef VBOX_STRICT
    3522         Log4(("ctrl.u64VmcbCleanBits             %#RX64\n",   pVmcb->ctrl.u64VmcbCleanBits));
     3522        Log4(("ctrl.u32VmcbCleanBits             %#RX32\n",   pVmcb->ctrl.u32VmcbCleanBits));
    35233523        Log4(("ctrl.u16InterceptRdCRx            %#x\n",      pVmcb->ctrl.u16InterceptRdCRx));
    35243524        Log4(("ctrl.u16InterceptWrCRx            %#x\n",      pVmcb->ctrl.u16InterceptWrCRx));
     
    40334033    /* If we've migrating CPUs, mark the VMCB Clean bits as dirty. */
    40344034    if (idCurrentCpu != pVCpu->hm.s.idLastCpu)
    4035         pVmcbNstGst->ctrl.u64VmcbCleanBits = 0;
     4035        pVmcbNstGst->ctrl.u32VmcbCleanBits = 0;
    40364036
    40374037    /* Store status of the shared guest-host state at the time of VMRUN. */
     
    40894089    bool const fSupportsVmcbCleanBits = hmR0SvmSupportsVmcbCleanBits(pVCpu, pCtx);
    40904090    if (!fSupportsVmcbCleanBits)
    4091         pVmcbNstGst->ctrl.u64VmcbCleanBits = 0;
     4091        pVmcbNstGst->ctrl.u32VmcbCleanBits = 0;
    40924092}
    40934093#endif
     
    41454145    /* If we've migrating CPUs, mark the VMCB Clean bits as dirty. */
    41464146    if (idCurrentCpu != pVCpu->hm.s.idLastCpu)
    4147         pVmcb->ctrl.u64VmcbCleanBits = 0;
     4147        pVmcb->ctrl.u32VmcbCleanBits = 0;
    41484148
    41494149    /* Store status of the shared guest-host state at the time of VMRUN. */
     
    41984198    bool const fSupportsVmcbCleanBits = hmR0SvmSupportsVmcbCleanBits(pVCpu, pCtx);
    41994199    if (!fSupportsVmcbCleanBits)
    4200         pVmcb->ctrl.u64VmcbCleanBits = 0;
     4200        pVmcb->ctrl.u32VmcbCleanBits = 0;
    42014201}
    42024202
     
    43104310
    43114311    /* Mark the VMCB-state cache as unmodified by VMM. */
    4312     pVmcbNstGstCtrl->u64VmcbCleanBits = HMSVM_VMCB_CLEAN_ALL;
     4312    pVmcbNstGstCtrl->u32VmcbCleanBits = HMSVM_VMCB_CLEAN_ALL;
    43134313
    43144314    /* If VMRUN failed, we can bail out early. This does -not- cover SVM_EXIT_INVALID. */
     
    43544354
    43554355    PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
    4356     pVmcb->ctrl.u64VmcbCleanBits = HMSVM_VMCB_CLEAN_ALL;        /* Mark the VMCB-state cache as unmodified by VMM. */
     4356    pVmcb->ctrl.u32VmcbCleanBits = HMSVM_VMCB_CLEAN_ALL;        /* Mark the VMCB-state cache as unmodified by VMM. */
    43574357
    43584358    /* TSC read must be done early for maximum accuracy. */
     
    65126512            pVmcb->ctrl.u16InterceptRdDRx = 0;
    65136513            pVmcb->ctrl.u16InterceptWrDRx = 0;
    6514             pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
     6514            pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
    65156515
    65166516            /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
     
    67636763                pVmcb->guest.u64DR6 = pCtx->dr[6];
    67646764                pVmcb->guest.u64DR7 = pCtx->dr[7];
    6765                 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
     6765                pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
    67666766                hmR0SvmSetPendingXcptDB(pVCpu);
    67676767            }
     
    73237323        Log5(("hmR0SvmExitXcptDB: DR6=%#RX64 -> %Rrc\n", pVmcb->guest.u64DR6, rc));
    73247324        pVmcb->guest.u64DR6 = X86_DR6_INIT_VAL;
    7325         pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
     7325        pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
    73267326    }
    73277327    else
  • trunk/src/VBox/VMM/VMMR3/HM.cpp

    r70254 r70258  
    36673667    pHlp->pfnPrintf(pHlp, "%su64NestedPagingCR3         = %#RX64\n",    pszPrefix, pVmcbCtrl->u64NestedPagingCR3);
    36683668    pHlp->pfnPrintf(pHlp, "%su64LBRVirt                 = %#RX64\n",    pszPrefix, pVmcbCtrl->u64LBRVirt);
    3669     pHlp->pfnPrintf(pHlp, "%su64VmcbCleanBits           = %#RX64\n",    pszPrefix, pVmcbCtrl->u64VmcbCleanBits);
     3669    pHlp->pfnPrintf(pHlp, "%su32VmcbCleanBits           = %#RX32\n",    pszPrefix, pVmcbCtrl->u32VmcbCleanBits);
    36703670    pHlp->pfnPrintf(pHlp, "%su64NextRIP                 = %#RX64\n",    pszPrefix, pVmcbCtrl->u64NextRIP);
    36713671    pHlp->pfnPrintf(pHlp, "%scbInstrFetched             = %u\n",        pszPrefix, pVmcbCtrl->cbInstrFetched);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette