VirtualBox

Changeset 87504 in vbox


Ignore:
Timestamp:
Feb 1, 2021 3:12:21 PM (4 years ago)
Author:
vboxsync
Message:

VMM/HMSVM: Moving more stuff to HMR0PERVCPU. bugref:9217

Location:
trunk
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/vmm/hm_svm.h

    r82968 r87504  
    10791079 * @remarks Please update hmR3InfoSvmNstGstVmcbCache() when changes are made to
    10801080 *          this structure.
    1081  */
    1082 #pragma pack(1)
     1081 * @todo r=bird: Why is this structure here? Looks 100% internal to me.
     1082 */
    10831083typedef struct SVMNESTEDVMCBCACHE
    10841084{
     
    11161116    bool                afPadding0[4];
    11171117} SVMNESTEDVMCBCACHE;
    1118 #pragma pack()
    11191118/** Pointer to the SVMNESTEDVMCBCACHE structure. */
    11201119typedef SVMNESTEDVMCBCACHE *PSVMNESTEDVMCBCACHE;
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r87503 r87504  
    21072107               every #VMEXIT if we should update the TPR. */
    21082108            Assert(pVmcb->ctrl.IntCtrl.n.u1VIntrMasking);
    2109             pVCpu->hm.s.svm.fSyncVTpr = false;
     2109            pVCpu->hmr0.s.svm.fSyncVTpr = false;
    21102110
    21112111            if (!pVM->hm.s.fTPRPatchingActive)
     
    21212121                {
    21222122                    pVmcb->ctrl.u16InterceptWrCRx &= ~RT_BIT(8);
    2123                     pVCpu->hm.s.svm.fSyncVTpr = true;
     2123                    pVCpu->hmr0.s.svm.fSyncVTpr = true;
    21242124                }
    21252125
     
    21382138                {
    21392139                    hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    2140                     pVCpu->hm.s.svm.fSyncVTpr = true;
     2140                    pVCpu->hmr0.s.svm.fSyncVTpr = true;
    21412141                }
    21422142                pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;
     
    25712571         * to the nested-guest intercepts and we always run with V_INTR_MASKING.
    25722572         */
    2573         pVCpu->hm.s.svm.fSyncVTpr = false;
     2573        pVCpu->hmr0.s.svm.fSyncVTpr = false;
    25742574
    25752575#ifdef DEBUG_ramshankar
     
    26022602    else
    26032603    {
    2604         Assert(!pVCpu->hm.s.svm.fSyncVTpr);
     2604        Assert(!pVCpu->hmr0.s.svm.fSyncVTpr);
    26052605        Assert(pVmcbNstGstCtrl->u64IOPMPhysAddr == g_HCPhysIOBitmap);
    26062606        Assert(RT_BOOL(pVmcbNstGstCtrl->NestedPagingCtrl.n.u1NestedPaging) == pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
     
    41294129     * world-switch so we can update it on the way back if the guest changed the TPR.
    41304130     */
    4131     if (pVCpu->hm.s.svm.fSyncVTpr)
     4131    if (pVCpu->hmr0.s.svm.fSyncVTpr)
    41324132    {
    41334133        Assert(!pSvmTransient->fIsNestedGuest);
     
    42854285    {
    42864286        uint64_t const uGuestTscAux = CPUMGetGuestTscAux(pVCpu);
    4287         pVCpu->hm.s.svm.u64HostTscAux  = ASMRdMsr(MSR_K8_TSC_AUX);
    4288         if (uGuestTscAux != pVCpu->hm.s.svm.u64HostTscAux)
     4287        pVCpu->hmr0.s.svm.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX);
     4288        if (uGuestTscAux != pVCpu->hmr0.s.svm.u64HostTscAux)
    42894289            ASMWrMsr(MSR_K8_TSC_AUX, uGuestTscAux);
    42904290        hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     
    43684368        uint64_t u64GuestTscAuxMsr = ASMRdMsr(MSR_K8_TSC_AUX);
    43694369        CPUMSetGuestTscAux(pVCpu, u64GuestTscAuxMsr);
    4370         if (u64GuestTscAuxMsr != pVCpu->hm.s.svm.u64HostTscAux)
    4371             ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm.s.svm.u64HostTscAux);
     4370        if (u64GuestTscAuxMsr != pVCpu->hmr0.s.svm.u64HostTscAux)
     4371            ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hmr0.s.svm.u64HostTscAux);
    43724372    }
    43734373
     
    44214421
    44224422    if (   pSvmTransient->u64ExitCode != SVM_EXIT_INVALID
    4423         && pVCpu->hm.s.svm.fSyncVTpr)
     4423        && pVCpu->hmr0.s.svm.fSyncVTpr)
    44244424    {
    44254425        Assert(!pSvmTransient->fIsNestedGuest);
  • trunk/src/VBox/VMM/include/HMInternal.h

    r87503 r87504  
    10181018    struct HMCPUSVM
    10191019    {
    1020         /** Whether VTPR with V_INTR_MASKING set is in effect, indicating
    1021          *  we should check if the VTPR changed on every VM-exit. */
    1022         bool                        fSyncVTpr;
    10231020        /** Whether to emulate long mode support for sysenter/sysexit like intel CPUs
    10241021         *  does.   This means intercepting \#UD to emulate the instructions in
     
    10261023         *  preserve the upper 32 bits written to them (AMD will ignore and discard). */
    10271024        bool                        fEmulateLongModeSysEnterExit;
    1028         uint8_t                     au8Alignment0[6];
    1029 
    1030         /** Host's TSC_AUX MSR (used when RDTSCP doesn't cause VM-exits). */
    1031         uint64_t                    u64HostTscAux;
     1025        uint8_t                     au8Alignment0[7];
    10321026
    10331027        /** Cache of the nested-guest's VMCB fields that we modify in order to run the
     
    13171311        R0PTRTYPE(void *)           pvMsrBitmap;
    13181312
     1313        /** Whether VTPR with V_INTR_MASKING set is in effect, indicating
     1314         *  we should check if the VTPR changed on every VM-exit. */
     1315        bool                        fSyncVTpr;
     1316        bool                        afAlignment[7];
     1317
     1318        /** Host's TSC_AUX MSR (used when RDTSCP doesn't cause VM-exits). */
     1319        uint64_t                    u64HostTscAux;
     1320
    13191321        /** For saving stack space, the disassembler state is allocated here
    13201322         * instead of on the stack. */
  • trunk/src/VBox/VMM/include/HMInternal.mac

    r87503 r87504  
    137137
    138138struc HMCPUSVM
    139     .fSyncVTpr                      resb    1
    140139    .fEmulateLongModeSysEnterExit   resb    1
    141140
    142141    alignb 8
    143     .u64HostTscAux                  resq    1
    144 
    145142    .NstGstVmcbCache                resb    40
    146143endstruc
     
    222219    .pvMsrBitmap                    RTR0PTR_RES  1
    223220
     221    .fSyncVTpr                      resb    1
     222
     223    alignb 8
     224    .u64HostTscAux                  resq    1
     225
     226    alignb 8
    224227    .DisState                       resb    0d8h
    225228endstruc
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette