Changeset 87504 in vbox
- Timestamp:
- Feb 1, 2021 3:12:21 PM (4 years ago)
- Location:
- trunk
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/hm_svm.h
r82968 r87504 1079 1079 * @remarks Please update hmR3InfoSvmNstGstVmcbCache() when changes are made to 1080 1080 * this structure. 1081 * /1082 #pragma pack(1) 1081 * @todo r=bird: Why is this structure here? Looks 100% internal to me. 1082 */ 1083 1083 typedef struct SVMNESTEDVMCBCACHE 1084 1084 { … … 1116 1116 bool afPadding0[4]; 1117 1117 } SVMNESTEDVMCBCACHE; 1118 #pragma pack()1119 1118 /** Pointer to the SVMNESTEDVMCBCACHE structure. */ 1120 1119 typedef SVMNESTEDVMCBCACHE *PSVMNESTEDVMCBCACHE; -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r87503 r87504 2107 2107 every #VMEXIT if we should update the TPR. */ 2108 2108 Assert(pVmcb->ctrl.IntCtrl.n.u1VIntrMasking); 2109 pVCpu->hm .s.svm.fSyncVTpr = false;2109 pVCpu->hmr0.s.svm.fSyncVTpr = false; 2110 2110 2111 2111 if (!pVM->hm.s.fTPRPatchingActive) … … 2121 2121 { 2122 2122 pVmcb->ctrl.u16InterceptWrCRx &= ~RT_BIT(8); 2123 pVCpu->hm .s.svm.fSyncVTpr = true;2123 pVCpu->hmr0.s.svm.fSyncVTpr = true; 2124 2124 } 2125 2125 … … 2138 2138 { 2139 2139 hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 2140 pVCpu->hm .s.svm.fSyncVTpr = true;2140 pVCpu->hmr0.s.svm.fSyncVTpr = true; 2141 2141 } 2142 2142 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM; … … 2571 2571 * to the nested-guest intercepts and we always run with V_INTR_MASKING. 2572 2572 */ 2573 pVCpu->hm .s.svm.fSyncVTpr = false;2573 pVCpu->hmr0.s.svm.fSyncVTpr = false; 2574 2574 2575 2575 #ifdef DEBUG_ramshankar … … 2602 2602 else 2603 2603 { 2604 Assert(!pVCpu->hm .s.svm.fSyncVTpr);2604 Assert(!pVCpu->hmr0.s.svm.fSyncVTpr); 2605 2605 Assert(pVmcbNstGstCtrl->u64IOPMPhysAddr == g_HCPhysIOBitmap); 2606 2606 Assert(RT_BOOL(pVmcbNstGstCtrl->NestedPagingCtrl.n.u1NestedPaging) == pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging); … … 4129 4129 * world-switch so we can update it on the way back if the guest changed the TPR. 4130 4130 */ 4131 if (pVCpu->hm .s.svm.fSyncVTpr)4131 if (pVCpu->hmr0.s.svm.fSyncVTpr) 4132 4132 { 4133 4133 Assert(!pSvmTransient->fIsNestedGuest); … … 4285 4285 { 4286 4286 uint64_t const uGuestTscAux = CPUMGetGuestTscAux(pVCpu); 4287 pVCpu->hm .s.svm.u64HostTscAux= ASMRdMsr(MSR_K8_TSC_AUX);4288 if (uGuestTscAux != pVCpu->hm .s.svm.u64HostTscAux)4287 pVCpu->hmr0.s.svm.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX); 4288 if (uGuestTscAux != pVCpu->hmr0.s.svm.u64HostTscAux) 4289 4289 ASMWrMsr(MSR_K8_TSC_AUX, uGuestTscAux); 4290 4290 hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); … … 4368 4368 uint64_t u64GuestTscAuxMsr = ASMRdMsr(MSR_K8_TSC_AUX); 4369 4369 CPUMSetGuestTscAux(pVCpu, u64GuestTscAuxMsr); 4370 if (u64GuestTscAuxMsr != pVCpu->hm .s.svm.u64HostTscAux)4371 ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm .s.svm.u64HostTscAux);4370 if (u64GuestTscAuxMsr != pVCpu->hmr0.s.svm.u64HostTscAux) 4371 ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hmr0.s.svm.u64HostTscAux); 4372 4372 } 4373 4373 … … 4421 4421 4422 4422 if ( pSvmTransient->u64ExitCode != SVM_EXIT_INVALID 4423 && pVCpu->hm .s.svm.fSyncVTpr)4423 && pVCpu->hmr0.s.svm.fSyncVTpr) 4424 4424 { 4425 4425 Assert(!pSvmTransient->fIsNestedGuest); -
trunk/src/VBox/VMM/include/HMInternal.h
r87503 r87504 1018 1018 struct HMCPUSVM 1019 1019 { 1020 /** Whether VTPR with V_INTR_MASKING set is in effect, indicating1021 * we should check if the VTPR changed on every VM-exit. */1022 bool fSyncVTpr;1023 1020 /** Whether to emulate long mode support for sysenter/sysexit like intel CPUs 1024 1021 * does. This means intercepting \#UD to emulate the instructions in … … 1026 1023 * preserve the upper 32 bits written to them (AMD will ignore and discard). */ 1027 1024 bool fEmulateLongModeSysEnterExit; 1028 uint8_t au8Alignment0[6]; 1029 1030 /** Host's TSC_AUX MSR (used when RDTSCP doesn't cause VM-exits). */ 1031 uint64_t u64HostTscAux; 1025 uint8_t au8Alignment0[7]; 1032 1026 1033 1027 /** Cache of the nested-guest's VMCB fields that we modify in order to run the … … 1317 1311 R0PTRTYPE(void *) pvMsrBitmap; 1318 1312 1313 /** Whether VTPR with V_INTR_MASKING set is in effect, indicating 1314 * we should check if the VTPR changed on every VM-exit. */ 1315 bool fSyncVTpr; 1316 bool afAlignment[7]; 1317 1318 /** Host's TSC_AUX MSR (used when RDTSCP doesn't cause VM-exits). */ 1319 uint64_t u64HostTscAux; 1320 1319 1321 /** For saving stack space, the disassembler state is allocated here 1320 1322 * instead of on the stack. */ -
trunk/src/VBox/VMM/include/HMInternal.mac
r87503 r87504 137 137 138 138 struc HMCPUSVM 139 .fSyncVTpr resb 1140 139 .fEmulateLongModeSysEnterExit resb 1 141 140 142 141 alignb 8 143 .u64HostTscAux resq 1144 145 142 .NstGstVmcbCache resb 40 146 143 endstruc … … 222 219 .pvMsrBitmap RTR0PTR_RES 1 223 220 221 .fSyncVTpr resb 1 222 223 alignb 8 224 .u64HostTscAux resq 1 225 226 alignb 8 224 227 .DisState resb 0d8h 225 228 endstruc
Note:
See TracChangeset
for help on using the changeset viewer.