- Timestamp:
- Apr 22, 2019 7:36:26 AM (6 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r78238 r78240 616 616 * @param pVCpu The cross context virtual CPU structure. 617 617 */ 618 DECL_FORCE_INLINE(uint64_t) hmR0VmxGetFixedCr0Mask(P VMCPU pVCpu)618 DECL_FORCE_INLINE(uint64_t) hmR0VmxGetFixedCr0Mask(PCVMCPU pVCpu) 619 619 { 620 620 /* … … 645 645 * @param pVCpu The cross context virtual CPU structure. 646 646 */ 647 DECL_FORCE_INLINE(uint64_t) hmR0VmxGetFixedCr4Mask(P VMCPU pVCpu)647 DECL_FORCE_INLINE(uint64_t) hmR0VmxGetFixedCr4Mask(PCVMCPU pVCpu) 648 648 { 649 649 /* … … 1147 1147 1148 1148 /* Paranoid: Disable interrupts as, in theory, interrupt handlers might mess with CR4. */ 1149 RTCCUINTREG fEFlags = ASMIntDisableFlags();1149 RTCCUINTREG const fEFlags = ASMIntDisableFlags(); 1150 1150 1151 1151 /* Enable the VMX bit in CR4 if necessary. */ 1152 RTCCUINTREG uOldCr4 = SUPR0ChangeCR4(X86_CR4_VMXE, RTCCUINTREG_MAX);1152 RTCCUINTREG const uOldCr4 = SUPR0ChangeCR4(X86_CR4_VMXE, RTCCUINTREG_MAX); 1153 1153 1154 1154 /* Enter VMX root mode. */ … … 1157 1157 { 1158 1158 if (!(uOldCr4 & X86_CR4_VMXE)) 1159 SUPR0ChangeCR4(0 , ~X86_CR4_VMXE);1159 SUPR0ChangeCR4(0 /* fOrMask */, ~X86_CR4_VMXE); 1160 1160 1161 1161 if (pVM) … … 1179 1179 1180 1180 /* Paranoid: Disable interrupts as, in theory, interrupts handlers might mess with CR4. */ 1181 RTCCUINTREG fEFlags = ASMIntDisableFlags();1181 RTCCUINTREG const fEFlags = ASMIntDisableFlags(); 1182 1182 1183 1183 /* If we're for some reason not in VMX root mode, then don't leave it. */ 1184 RTCCUINTREG uHostCR4 = ASMGetCR4();1184 RTCCUINTREG const uHostCR4 = ASMGetCR4(); 1185 1185 1186 1186 int rc; … … 1189 1189 /* Exit VMX root mode and clear the VMX bit in CR4. */ 1190 1190 VMXDisable(); 1191 SUPR0ChangeCR4(0 , ~X86_CR4_VMXE);1191 SUPR0ChangeCR4(0 /* fOrMask */, ~X86_CR4_VMXE); 1192 1192 rc = VINF_SUCCESS; 1193 1193 } … … 1865 1865 * @remarks No-long-jump zone!!! 1866 1866 */ 1867 static void hmR0VmxUpdateAutoLoadHostMsrs(P VMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo)1867 static void hmR0VmxUpdateAutoLoadHostMsrs(PCVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo) 1868 1868 { 1869 1869 PVMXAUTOMSR pHostMsrLoad = (PVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad; … … 1927 1927 * @param idMsr The MSR to check. 1928 1928 */ 1929 static bool hmR0VmxIsLazyGuestMsr(P VMCPU pVCpu, uint32_t idMsr)1929 static bool hmR0VmxIsLazyGuestMsr(PCVMCPU pVCpu, uint32_t idMsr) 1930 1930 { 1931 1931 NOREF(pVCpu); … … 2118 2118 * @param pVmcsInfo The VMCS info. object. 2119 2119 */ 2120 static void hmR0VmxCheckHostEferMsr(P VMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo)2120 static void hmR0VmxCheckHostEferMsr(PCVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo) 2121 2121 { 2122 2122 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); … … 4164 4164 * @remarks No-long-jump zone!!! 4165 4165 */ 4166 static bool hmR0VmxShouldSwapEferMsr(P VMCPU pVCpu)4166 static bool hmR0VmxShouldSwapEferMsr(PCVMCPU pVCpu) 4167 4167 { 4168 4168 #ifdef HMVMX_ALWAYS_SWAP_EFER … … 4170 4170 return true; 4171 4171 #else 4172 PC PUMCTX pCtx = &pVCpu->cpum.GstCtx;4172 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 4173 4173 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) 4174 4174 /* For 32-bit hosts running 64-bit guests, we always swap EFER MSR in the world-switcher. Nothing to do here. */ … … 6501 6501 uint64_t uTscOffset; 6502 6502 PVM pVM = pVCpu->CTX_SUFF(pVM); 6503 PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu); ;6503 PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu); 6504 6504 6505 6505 if (pVM->hm.s.vmx.fUsePreemptTimer) … … 9861 9861 * @param pVmcsInfoGst The guest VMCS info. object. 9862 9862 */ 9863 static void hmR0VmxMergeMsrBitmapNested(P VMCPU pVCpu, PVMXVMCSINFO pVmcsInfoNstGst, PCVMXVMCSINFO pVmcsInfoGst)9863 static void hmR0VmxMergeMsrBitmapNested(PCVMCPU pVCpu, PVMXVMCSINFO pVmcsInfoNstGst, PCVMXVMCSINFO pVmcsInfoGst) 9864 9864 { 9865 9865 uint64_t const *pu64MsrBitmapNstGst = (uint64_t const *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap); … … 12801 12801 AssertRCReturn(rc, rc); 12802 12802 12803 uint32_t uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);12803 uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo); 12804 12804 Assert( !(pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT) 12805 12805 && uIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT); … … 12834 12834 } 12835 12835 12836 uint32_t uExitIntInfo = pVmxTransient->uExitIntInfo;12837 uint32_t uVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);12836 uint32_t const uExitIntInfo = pVmxTransient->uExitIntInfo; 12837 uint32_t const uVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo); 12838 12838 switch (uIntType) 12839 12839 { … … 14770 14770 14771 14771 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */ 14772 uint64_t uDR6 = X86_DR6_INIT_VAL; 14773 uDR6 |= (pVmxTransient->uExitQual & (X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3 | X86_DR6_BD | X86_DR6_BS)); 14772 uint64_t const uDR6 = X86_DR6_INIT_VAL 14773 | (pVmxTransient->uExitQual & ( X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3 14774 | X86_DR6_BD | X86_DR6_BS)); 14774 14775 14775 14776 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; … … 14781 14782 * The exception was for the guest. Update DR6, DR7.GD and 14782 14783 * IA32_DEBUGCTL.LBR before forwarding it. 14783 * (See Intel spec. 27.1 "Architectural State before a VM-Exit".)14784 * See Intel spec. 27.1 "Architectural State before a VM-Exit". 14784 14785 */ 14785 14786 VMMRZCallRing3Disable(pVCpu);
Note:
See TracChangeset
for help on using the changeset viewer.