VirtualBox

Changeset 87500 in vbox


Ignore:
Timestamp:
Feb 1, 2021 2:16:43 PM (4 years ago)
Author:
vboxsync
Message:

VMM/HM: Drop the VMX/SVM data unions in the VM and GVM structures. Too risky and not worth saving a few bytes.

Location:
trunk/src/VBox/VMM
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMR0A.asm

    r87491 r87500  
    13501350
    13511351        ; Save host fs, gs, sysenter msr etc.
    1352         mov     rax, [rsi + VMCPU.hm + HMCPU.u + HMCPUSVM.HCPhysVmcbHost]
     1352        mov     rax, [rsi + VMCPU.hm + HMCPU.svm + HMCPUSVM.HCPhysVmcbHost]
    13531353        mov     qword [rbp + frm_HCPhysVmcbHost], rax ; save for the vmload after vmrun
    13541354        lea     rsi, [rsi + VMCPU.cpum.GstCtx]
  • trunk/src/VBox/VMM/include/HMInternal.h

    r87493 r87500  
    395395typedef FNHMSWITCHERHC *PFNHMSWITCHERHC;
    396396
    397 /** @def HM_UNION_NM
    398  * For compilers (like DTrace) that does not grok nameless unions, we have a
    399  * little hack to make them palatable.
    400  */
    401 /** @def HM_STRUCT_NM
    402  * For compilers (like DTrace) that does not grok nameless structs (it is
    403  * non-standard C++), we have a little hack to make them palatable.
    404  */
    405 /** @def HM_NAMELESS_UNION_TAG
    406  * For tagging a nameless union so tstASMStructs.cpp can find check the nested
    407  * structures within the union.
    408  */
    409 #ifdef VBOX_FOR_DTRACE_LIB
    410 # define HM_UNION_NM(a_Nm)              a_Nm
    411 # define HM_STRUCT_NM(a_Nm)             a_Nm
    412 # define HM_NAMELESS_UNION_TAG(a_Tag)
    413 #elif defined(IPRT_WITHOUT_NAMED_UNIONS_AND_STRUCTS)
    414 # define HM_UNION_NM(a_Nm)              a_Nm
    415 # define HM_STRUCT_NM(a_Nm)             a_Nm
    416 # define HM_NAMELESS_UNION_TAG(a_Tag)   a_Tag
    417 #else
    418 # define HM_UNION_NM(a_Nm)
    419 # define HM_STRUCT_NM(a_Nm)
    420 # define HM_NAMELESS_UNION_TAG(a_Tag)
    421 #endif
    422397
    423398/**
     
    1000975    uint64_t                    fCtxChanged;
    1001976
    1002     union HM_NAMELESS_UNION_TAG(HMCPUUNION) /* no tag! */
     977    /** VT-x data.   */
     978    struct HMCPUVMX
    1003979    {
    1004         /** VT-x data.   */
    1005         struct HM_NAMELESS_UNION_TAG(HMCPUVMX)
     980        /** @name Guest information.
     981         * @{ */
     982        /** Guest VMCS information shared with ring-3. */
     983        VMXVMCSINFOSHARED           VmcsInfo;
     984        /** Nested-guest VMCS information shared with ring-3. */
     985        VMXVMCSINFOSHARED           VmcsInfoNstGst;
     986        /** Whether the nested-guest VMCS was the last current VMCS (shadow copy for ring-3).
     987         * @see HMR0PERVCPU::vmx.fSwitchedToNstGstVmcs  */
     988        bool                        fSwitchedToNstGstVmcsCopyForRing3;
     989        /** Whether the static guest VMCS controls has been merged with the
     990         *  nested-guest VMCS controls. */
     991        bool                        fMergedNstGstCtls;
     992        /** Whether the nested-guest VMCS has been copied to the shadow VMCS. */
     993        bool                        fCopiedNstGstToShadowVmcs;
     994        /** Whether flushing the TLB is required due to switching to/from the
     995         *  nested-guest. */
     996        bool                        fSwitchedNstGstFlushTlb;
     997        /** Alignment. */
     998        bool                        afAlignment0[4];
     999        /** Cached guest APIC-base MSR for identifying when to map the APIC-access page. */
     1000        uint64_t                    u64GstMsrApicBase;
     1001        /** @} */
     1002
     1003        /** @name Error reporting and diagnostics.
     1004         * @{ */
     1005        /** VT-x error-reporting (mainly for ring-3 propagation). */
     1006        struct
    10061007        {
    1007             /** @name Guest information.
    1008              * @{ */
    1009             /** Guest VMCS information shared with ring-3. */
    1010             VMXVMCSINFOSHARED           VmcsInfo;
    1011             /** Nested-guest VMCS information shared with ring-3. */
    1012             VMXVMCSINFOSHARED           VmcsInfoNstGst;
    1013             /** Whether the nested-guest VMCS was the last current VMCS (shadow copy for ring-3).
    1014              * @see HMR0PERVCPU::vmx.fSwitchedToNstGstVmcs  */
    1015             bool                        fSwitchedToNstGstVmcsCopyForRing3;
    1016             /** Whether the static guest VMCS controls has been merged with the
    1017              *  nested-guest VMCS controls. */
    1018             bool                        fMergedNstGstCtls;
    1019             /** Whether the nested-guest VMCS has been copied to the shadow VMCS. */
    1020             bool                        fCopiedNstGstToShadowVmcs;
    1021             /** Whether flushing the TLB is required due to switching to/from the
    1022              *  nested-guest. */
    1023             bool                        fSwitchedNstGstFlushTlb;
    1024             /** Alignment. */
    1025             bool                        afAlignment0[4];
    1026             /** Cached guest APIC-base MSR for identifying when to map the APIC-access page. */
    1027             uint64_t                    u64GstMsrApicBase;
    1028             /** @} */
    1029 
    1030             /** @name Error reporting and diagnostics.
    1031              * @{ */
    1032             /** VT-x error-reporting (mainly for ring-3 propagation). */
    1033             struct
    1034             {
    1035                 RTCPUID                 idCurrentCpu;
    1036                 RTCPUID                 idEnteredCpu;
    1037                 RTHCPHYS                HCPhysCurrentVmcs;
    1038                 uint32_t                u32VmcsRev;
    1039                 uint32_t                u32InstrError;
    1040                 uint32_t                u32ExitReason;
    1041                 uint32_t                u32GuestIntrState;
    1042             } LastError;
    1043             /** @} */
    1044         } vmx;
    1045 
    1046         /** SVM data. */
    1047         struct HM_NAMELESS_UNION_TAG(HMCPUSVM)
    1048         {
    1049             /** Physical address of the host VMCB which holds additional host-state. */
    1050             RTHCPHYS                    HCPhysVmcbHost;
    1051             /** R0 memory object for the host VMCB which holds additional host-state. */
    1052             RTR0MEMOBJ                  hMemObjVmcbHost;
    1053             /** Padding.
    1054              * @todo remove, pointless now  */
    1055             R0PTRTYPE(void *)           pvPadding;
    1056 
    1057             /** Physical address of the guest VMCB. */
    1058             RTHCPHYS                    HCPhysVmcb;
    1059             /** R0 memory object for the guest VMCB. */
    1060             RTR0MEMOBJ                  hMemObjVmcb;
    1061             /** Pointer to the guest VMCB. */
    1062             R0PTRTYPE(PSVMVMCB)         pVmcb;
    1063 
    1064             /** Physical address of the MSR bitmap (8 KB). */
    1065             RTHCPHYS                    HCPhysMsrBitmap;
    1066             /** R0 memory object for the MSR bitmap (8 KB). */
    1067             RTR0MEMOBJ                  hMemObjMsrBitmap;
    1068             /** Pointer to the MSR bitmap. */
    1069             R0PTRTYPE(void *)           pvMsrBitmap;
    1070 
    1071             /** Whether VTPR with V_INTR_MASKING set is in effect, indicating
    1072              *  we should check if the VTPR changed on every VM-exit. */
    1073             bool                        fSyncVTpr;
    1074             /** Whether to emulate long mode support for sysenter/sysexit like intel CPUs
    1075              *  does.   This means intercepting \#UD to emulate the instructions in
    1076              *  long-mode and to intercept reads and writes to the SYSENTER MSRs in order to
    1077              *  preserve the upper 32 bits written to them (AMD will ignore and discard). */
    1078             bool                        fEmulateLongModeSysEnterExit;
    1079             uint8_t                     au8Alignment0[6];
    1080 
    1081             /** Host's TSC_AUX MSR (used when RDTSCP doesn't cause VM-exits). */
    1082             uint64_t                    u64HostTscAux;
    1083 
    1084             /** Cache of the nested-guest's VMCB fields that we modify in order to run the
    1085              *  nested-guest using AMD-V. This will be restored on \#VMEXIT. */
    1086             SVMNESTEDVMCBCACHE          NstGstVmcbCache;
    1087         } svm;
    1088     } HM_UNION_NM(u);
     1008            RTCPUID                 idCurrentCpu;
     1009            RTCPUID                 idEnteredCpu;
     1010            RTHCPHYS                HCPhysCurrentVmcs;
     1011            uint32_t                u32VmcsRev;
     1012            uint32_t                u32InstrError;
     1013            uint32_t                u32ExitReason;
     1014            uint32_t                u32GuestIntrState;
     1015        } LastError;
     1016        /** @} */
     1017    } vmx;
     1018
     1019    /** SVM data. */
     1020    struct HMCPUSVM
     1021    {
     1022        /** Physical address of the host VMCB which holds additional host-state. */
     1023        RTHCPHYS                    HCPhysVmcbHost;
     1024        /** R0 memory object for the host VMCB which holds additional host-state. */
     1025        RTR0MEMOBJ                  hMemObjVmcbHost;
     1026        /** Padding.
     1027         * @todo remove, pointless now  */
     1028        R0PTRTYPE(void *)           pvPadding;
     1029
     1030        /** Physical address of the guest VMCB. */
     1031        RTHCPHYS                    HCPhysVmcb;
     1032        /** R0 memory object for the guest VMCB. */
     1033        RTR0MEMOBJ                  hMemObjVmcb;
     1034        /** Pointer to the guest VMCB. */
     1035        R0PTRTYPE(PSVMVMCB)         pVmcb;
     1036
     1037        /** Physical address of the MSR bitmap (8 KB). */
     1038        RTHCPHYS                    HCPhysMsrBitmap;
     1039        /** R0 memory object for the MSR bitmap (8 KB). */
     1040        RTR0MEMOBJ                  hMemObjMsrBitmap;
     1041        /** Pointer to the MSR bitmap. */
     1042        R0PTRTYPE(void *)           pvMsrBitmap;
     1043
     1044        /** Whether VTPR with V_INTR_MASKING set is in effect, indicating
     1045         *  we should check if the VTPR changed on every VM-exit. */
     1046        bool                        fSyncVTpr;
     1047        /** Whether to emulate long mode support for sysenter/sysexit like intel CPUs
     1048         *  does.   This means intercepting \#UD to emulate the instructions in
     1049         *  long-mode and to intercept reads and writes to the SYSENTER MSRs in order to
     1050         *  preserve the upper 32 bits written to them (AMD will ignore and discard). */
     1051        bool                        fEmulateLongModeSysEnterExit;
     1052        uint8_t                     au8Alignment0[6];
     1053
     1054        /** Host's TSC_AUX MSR (used when RDTSCP doesn't cause VM-exits). */
     1055        uint64_t                    u64HostTscAux;
     1056
     1057        /** Cache of the nested-guest's VMCB fields that we modify in order to run the
     1058         *  nested-guest using AMD-V. This will be restored on \#VMEXIT. */
     1059        SVMNESTEDVMCBCACHE          NstGstVmcbCache;
     1060    } svm;
    10891061
    10901062    /** Event injection state. */
     
    12571229AssertCompileMemberAlignment(HMCPU, fCheckedTLBFlush,  4);
    12581230AssertCompileMemberAlignment(HMCPU, fCtxChanged,       8);
    1259 AssertCompileMemberAlignment(HMCPU, HM_UNION_NM(u.) vmx, 8);
    1260 AssertCompileMemberAlignment(HMCPU, HM_UNION_NM(u.) vmx.VmcsInfo,       8);
    1261 AssertCompileMemberAlignment(HMCPU, HM_UNION_NM(u.) vmx.VmcsInfoNstGst, 8);
    1262 AssertCompileMemberAlignment(HMCPU, HM_UNION_NM(u.) svm, 8);
     1231AssertCompileMemberAlignment(HMCPU, vmx, 8);
     1232AssertCompileMemberAlignment(HMCPU, vmx.VmcsInfo,       8);
     1233AssertCompileMemberAlignment(HMCPU, vmx.VmcsInfoNstGst, 8);
     1234AssertCompileMemberAlignment(HMCPU, svm, 8);
    12631235AssertCompileMemberAlignment(HMCPU, Event, 8);
    12641236
     
    13001272    bool                        afPadding1[5];
    13011273
    1302     union HM_NAMELESS_UNION_TAG(HMR0CPUUNION) /* no tag! */
     1274    /** VT-x data.   */
     1275    struct HMR0CPUVMX
    13031276    {
    1304         /** VT-x data.   */
    1305         struct HM_NAMELESS_UNION_TAG(HMR0CPUVMX)
    1306         {
    1307             /** Ring-0 pointer to the hardware-assisted VMX execution function. */
    1308             PFNHMVMXSTARTVM             pfnStartVm;
    1309 
    1310             /** @name Guest information.
    1311              * @{ */
    1312             /** Guest VMCS information. */
    1313             VMXVMCSINFO                 VmcsInfo;
    1314             /** Nested-guest VMCS information. */
    1315             VMXVMCSINFO                 VmcsInfoNstGst;
    1316             /* Whether the nested-guest VMCS was the last current VMCS (authoritative copy).
    1317              * @see HMCPU::vmx.fSwitchedToNstGstVmcsCopyForRing3  */
    1318             bool                        fSwitchedToNstGstVmcs;
    1319             bool                        afAlignment0[7];
    1320             /** @} */
    1321 
    1322             /** @name Host information.
    1323              * @{ */
    1324             /** Host LSTAR MSR to restore lazily while leaving VT-x. */
    1325             uint64_t                    u64HostMsrLStar;
    1326             /** Host STAR MSR to restore lazily while leaving VT-x. */
    1327             uint64_t                    u64HostMsrStar;
    1328             /** Host SF_MASK MSR to restore lazily while leaving VT-x. */
    1329             uint64_t                    u64HostMsrSfMask;
    1330             /** Host KernelGS-Base MSR to restore lazily while leaving VT-x. */
    1331             uint64_t                    u64HostMsrKernelGsBase;
    1332             /** The mask of lazy MSRs swap/restore state, see VMX_LAZY_MSRS_XXX. */
    1333             uint32_t                    fLazyMsrs;
    1334             /** Whether the host MSR values are up-to-date in the auto-load/store MSR area. */
    1335             bool                        fUpdatedHostAutoMsrs;
    1336             /** Alignment. */
    1337             uint8_t                     au8Alignment0[3];
    1338             /** Which host-state bits to restore before being preempted, see
    1339              * VMX_RESTORE_HOST_XXX. */
    1340             uint32_t                    fRestoreHostFlags;
    1341             /** Alignment. */
    1342             uint32_t                    u32Alignment0;
    1343             /** The host-state restoration structure. */
    1344             VMXRESTOREHOST              RestoreHost;
    1345             /** @} */
    1346         } vmx;
    1347 
    1348         /** SVM data. */
    1349         struct HM_NAMELESS_UNION_TAG(HMR0CPUSVM)
    1350         {
    1351             /** Ring 0 handlers for VT-x. */
    1352             PFNHMSVMVMRUN               pfnVMRun;
    1353 
    1354             /** For saving stack space, the disassembler state is allocated here
    1355              * instead of on the stack. */
    1356             DISCPUSTATE                 DisState;
    1357         } svm;
    1358     } HM_UNION_NM(u);
     1277        /** Ring-0 pointer to the hardware-assisted VMX execution function. */
     1278        PFNHMVMXSTARTVM             pfnStartVm;
     1279
     1280        /** @name Guest information.
     1281         * @{ */
     1282        /** Guest VMCS information. */
     1283        VMXVMCSINFO                 VmcsInfo;
     1284        /** Nested-guest VMCS information. */
     1285        VMXVMCSINFO                 VmcsInfoNstGst;
     1286        /* Whether the nested-guest VMCS was the last current VMCS (authoritative copy).
     1287         * @see HMCPU::vmx.fSwitchedToNstGstVmcsCopyForRing3  */
     1288        bool                        fSwitchedToNstGstVmcs;
     1289        bool                        afAlignment0[7];
     1290        /** @} */
     1291
     1292        /** @name Host information.
     1293         * @{ */
     1294        /** Host LSTAR MSR to restore lazily while leaving VT-x. */
     1295        uint64_t                    u64HostMsrLStar;
     1296        /** Host STAR MSR to restore lazily while leaving VT-x. */
     1297        uint64_t                    u64HostMsrStar;
     1298        /** Host SF_MASK MSR to restore lazily while leaving VT-x. */
     1299        uint64_t                    u64HostMsrSfMask;
     1300        /** Host KernelGS-Base MSR to restore lazily while leaving VT-x. */
     1301        uint64_t                    u64HostMsrKernelGsBase;
     1302        /** The mask of lazy MSRs swap/restore state, see VMX_LAZY_MSRS_XXX. */
     1303        uint32_t                    fLazyMsrs;
     1304        /** Whether the host MSR values are up-to-date in the auto-load/store MSR area. */
     1305        bool                        fUpdatedHostAutoMsrs;
     1306        /** Alignment. */
     1307        uint8_t                     au8Alignment0[3];
     1308        /** Which host-state bits to restore before being preempted, see
     1309         * VMX_RESTORE_HOST_XXX. */
     1310        uint32_t                    fRestoreHostFlags;
     1311        /** Alignment. */
     1312        uint32_t                    u32Alignment0;
     1313        /** The host-state restoration structure. */
     1314        VMXRESTOREHOST              RestoreHost;
     1315        /** @} */
     1316    } vmx;
     1317
     1318    /** SVM data. */
     1319    struct HMR0CPUSVM
     1320    {
     1321        /** Ring 0 handlers for VT-x. */
     1322        PFNHMSVMVMRUN               pfnVMRun;
     1323
     1324        /** For saving stack space, the disassembler state is allocated here
     1325         * instead of on the stack. */
     1326        DISCPUSTATE                 DisState;
     1327    } svm;
    13591328} HMR0PERVCPU;
    13601329/** Pointer to HM ring-0 VMCPU instance data. */
     
    13621331AssertCompileMemberAlignment(HMR0PERVCPU, cWorldSwitchExits, 4);
    13631332AssertCompileMemberAlignment(HMR0PERVCPU, fForceTLBFlush,    4);
    1364 AssertCompileMemberAlignment(HMR0PERVCPU, HM_UNION_NM(u.) vmx.RestoreHost,    8);
     1333AssertCompileMemberAlignment(HMR0PERVCPU, vmx.RestoreHost,   8);
    13651334
    13661335
  • trunk/src/VBox/VMM/include/HMInternal.mac

    r87493 r87500  
    176176
    177177    alignb 8
    178 ;%if HMCPUVMX_size > HMCPUSVM_size
    179     .u                              resb    HMCPUVMX_size
    180 ;%else
    181 ;    .u                              resb    HMCPUSVM_size
    182 ;%endif
     178    .vmx                            resb    HMCPUVMX_size
     179    alignb 8
     180    .svm                            resb    HMCPUSVM_size
    183181
    184182    .Event.fPending                 resd    1
     
    241239
    242240    alignb 8
    243 ;%if HMR0CPUVMX_size > HMR0CPUSVM_size
    244     .u                              resb    HMR0CPUVMX_size
    245 ;%else
    246 ;    .u                              resb    HMR0CPUSVM_size
    247 ;%endif
    248 endstruc
    249 
     241    .vmx                            resb    HMR0CPUVMX_size
     242    alignb 8
     243    .svm                            resb    HMR0CPUSVM_size
     244endstruc
     245
  • trunk/src/VBox/VMM/testcase/tstAsmStructs.cpp

    r87466 r87500  
    3535
    3636/* Hack for validating nested HMCPU structures. */
    37 typedef HMCPU::HMCPUUNION::HMCPUVMX HMCPUVMX;
    38 typedef HMCPU::HMCPUUNION::HMCPUSVM HMCPUSVM;
    39 typedef HMR0PERVCPU::HMR0CPUUNION::HMR0CPUVMX HMR0CPUVMX;
    40 typedef HMR0PERVCPU::HMR0CPUUNION::HMR0CPUSVM HMR0CPUSVM;
     37typedef HMCPU::HMCPUVMX HMCPUVMX;
     38typedef HMCPU::HMCPUSVM HMCPUSVM;
     39typedef HMR0PERVCPU::HMR0CPUVMX HMR0CPUVMX;
     40typedef HMR0PERVCPU::HMR0CPUSVM HMR0CPUSVM;
    4141
    4242/* For sup.mac simplifications. */
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette