VirtualBox

Changeset 72596 in vbox for trunk


Ignore:
Timestamp:
Jun 18, 2018 12:51:15 PM (7 years ago)
Author:
vboxsync
Message:

EM,HM: Removed EMInterpretRdtsc and EMInterpretRdtscp. bugref:6973

Location:
trunk
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/vmm/em.h

    r72590 r72596  
    313313#endif
    314314VMM_INT_DECL(int)               EMInterpretCpuId(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame);
    315 #if 1 /** @todo Remove after testing and enabling @bugref{6973}. */
    316 VMM_INT_DECL(int)               EMInterpretRdtsc(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame);
    317 VMM_INT_DECL(int)               EMInterpretRdtscp(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
    318 #endif
    319315VMM_INT_DECL(int)               EMInterpretRdpmc(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame);
    320316VMM_INT_DECL(VBOXSTRICTRC)      EMInterpretInvlpg(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pAddrGC);
  • trunk/src/VBox/VMM/VMMAll/EMAll.cpp

    r72590 r72596  
    18651865}
    18661866
    1867 
    1868 #if 1 /** @todo Remove after testing and enabling @bugref{6973}. */
    1869 
    1870 /**
    1871  * Interpret RDTSC.
    1872  *
    1873  * @returns VBox status code.
    1874  * @param   pVM         The cross context VM structure.
    1875  * @param   pVCpu       The cross context virtual CPU structure.
    1876  * @param   pRegFrame   The register frame.
    1877  *
    1878  */
    1879 VMM_INT_DECL(int) EMInterpretRdtsc(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
    1880 {
    1881     Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
    1882     unsigned uCR4 = CPUMGetGuestCR4(pVCpu);
    1883 
    1884     if (uCR4 & X86_CR4_TSD)
    1885         return VERR_EM_INTERPRETER; /* genuine #GP */
    1886 
    1887     uint64_t uTicks = TMCpuTickGet(pVCpu);
    1888 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    1889     uTicks = CPUMApplyNestedGuestTscOffset(pVCpu, uTicks);
    1890 #endif
    1891 
    1892     /* Same behaviour in 32 & 64 bits mode */
    1893     pRegFrame->rax = RT_LO_U32(uTicks);
    1894     pRegFrame->rdx = RT_HI_U32(uTicks);
    1895 #ifdef VBOX_COMPARE_IEM_AND_EM
    1896     g_fIgnoreRaxRdx = true;
    1897 #endif
    1898 
    1899     NOREF(pVM);
    1900     return VINF_SUCCESS;
    1901 }
    1902 
    1903 /**
    1904  * Interpret RDTSCP.
    1905  *
    1906  * @returns VBox status code.
    1907  * @param   pVM         The cross context VM structure.
    1908  * @param   pVCpu       The cross context virtual CPU structure.
    1909  * @param   pCtx        The CPU context.
    1910  *
    1911  */
    1912 VMM_INT_DECL(int) EMInterpretRdtscp(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    1913 {
    1914     Assert(pCtx == CPUMQueryGuestCtxPtr(pVCpu));
    1915     uint32_t uCR4 = CPUMGetGuestCR4(pVCpu);
    1916 
    1917     if (!pVM->cpum.ro.GuestFeatures.fRdTscP)
    1918     {
    1919         AssertFailed();
    1920         return VERR_EM_INTERPRETER; /* genuine #UD */
    1921     }
    1922 
    1923     if (uCR4 & X86_CR4_TSD)
    1924         return VERR_EM_INTERPRETER; /* genuine #GP */
    1925 
    1926     uint64_t uTicks = TMCpuTickGet(pVCpu);
    1927 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    1928     uTicks = CPUMApplyNestedGuestTscOffset(pVCpu, uTicks);
    1929 #endif
    1930 
    1931     /* Same behaviour in 32 & 64 bits mode */
    1932     pCtx->rax = RT_LO_U32(uTicks);
    1933     pCtx->rdx = RT_HI_U32(uTicks);
    1934 #ifdef VBOX_COMPARE_IEM_AND_EM
    1935     g_fIgnoreRaxRdx = true;
    1936 #endif
    1937     /* Low dword of the TSC_AUX msr only. */
    1938     VBOXSTRICTRC rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &pCtx->rcx); Assert(rc2 == VINF_SUCCESS); NOREF(rc2);
    1939     pCtx->rcx &= UINT32_C(0xffffffff);
    1940 
    1941     return VINF_SUCCESS;
    1942 }
    1943 
    1944 #endif /* Trying to use IEM APIs instead. */
    19451867
    19461868/**
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r72595 r72596  
    62556255{
    62566256    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
    6257 #if 1 /** @todo Needs testing. @bugref{6973} */
    62586257    VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx, 2));
    62596258    if (rcStrict == VINF_SUCCESS)
     
    62646263    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
    62656264    return VBOXSTRICTRC_TODO(rcStrict);
    6266 #else
    6267     int rc = EMInterpretRdtsc(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
    6268     if (RT_LIKELY(rc == VINF_SUCCESS))
    6269     {
    6270         pSvmTransient->fUpdateTscOffsetting = true;
    6271         HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
    6272     }
    6273     else
    6274     {
    6275         AssertMsgFailed(("hmR0SvmExitRdtsc: EMInterpretRdtsc failed with %Rrc\n", rc));
    6276         rc = VERR_EM_INTERPRETER;
    6277     }
    6278     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
    6279     return rc;
    6280 #endif
    62816265}
    62826266
     
    62886272{
    62896273    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
    6290 #if 1 /** @todo Needs testing. @bugref{6973} */
    62916274    VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx, 2));
    62926275    if (rcStrict == VINF_SUCCESS)
     
    62976280    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtscp);
    62986281    return VBOXSTRICTRC_TODO(rcStrict);
    6299 #else
    6300     int rc = EMInterpretRdtscp(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx);
    6301     if (RT_LIKELY(rc == VINF_SUCCESS))
    6302     {
    6303         pSvmTransient->fUpdateTscOffsetting = true;
    6304         hmR0SvmAdvanceRipHwAssist(pVCpu, pCtx, 3);
    6305         HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
    6306     }
    6307     else
    6308     {
    6309         AssertMsgFailed(("hmR0SvmExitRdtsc: EMInterpretRdtscp failed with %Rrc\n", rc));
    6310         rc = VERR_EM_INTERPRETER;
    6311     }
    6312     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtscp);
    6313     return rc;
    6314 #endif
    63156282}
    63166283
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r72593 r72596  
    1182911829{
    1183011830    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
    11831 #if 1 /** @todo Needs testing. @bugref{6973} */
    1183211831    int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);      /* Needed for CPL < 0 only, really. */
    1183311832    rc    |= hmR0VmxSaveGuestRegsForIemExec(pVCpu, pMixedCtx, false /*fMemory*/, false /*fNeedRsp*/);
     
    1184511844    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
    1184611845    return rcStrict;
    11847 #else
    11848     int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
    11849     AssertRCReturn(rc, rc);
    11850 
    11851     PVM pVM = pVCpu->CTX_SUFF(pVM);
    11852     rc = EMInterpretRdtsc(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
    11853     if (RT_LIKELY(rc == VINF_SUCCESS))
    11854     {
    11855         rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
    11856         Assert(pVmxTransient->cbInstr == 2);
    11857         /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
    11858         if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING)
    11859             pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
    11860     }
    11861     else
    11862         rc = VERR_EM_INTERPRETER;
    11863     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
    11864     return rc;
    11865 #endif
    1186611846}
    1186711847
     
    1187311853{
    1187411854    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
    11875 #if 1 /** @todo Needs testing. @bugref{6973} */
    1187611855    int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);      /* Needed for CPL < 0 only, really. */
    1187711856    rc    |= hmR0VmxSaveGuestRegsForIemExec(pVCpu, pMixedCtx, false /*fMemory*/, false /*fNeedRsp*/);
     
    1189011869    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtscp);
    1189111870    return rcStrict;
    11892 #else
    11893     int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
    11894     rc    |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);  /* For MSR_K8_TSC_AUX */
    11895     AssertRCReturn(rc, rc);
    11896 
    11897     PVM pVM = pVCpu->CTX_SUFF(pVM);
    11898     rc = EMInterpretRdtscp(pVM, pVCpu, pMixedCtx);
    11899     if (RT_SUCCESS(rc))
    11900     {
    11901         rc  = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
    11902         Assert(pVmxTransient->cbInstr == 3);
    11903         /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
    11904         if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING)
    11905             pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
    11906     }
    11907     else
    11908     {
    11909         AssertMsgFailed(("hmR0VmxExitRdtscp: EMInterpretRdtscp failed with %Rrc\n", rc));
    11910         rc = VERR_EM_INTERPRETER;
    11911     }
    11912     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtscp);
    11913     return rc;
    11914 #endif
    1191511871}
    1191611872
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette