VirtualBox

Changeset 30861 in vbox


Ignore:
Timestamp:
Jul 15, 2010 6:09:29 PM (15 years ago)
Author:
vboxsync
Message:

VMM,REM: Replumbled the MSR updating and reading so that PGM can easily be notified when EFER.NXE changes.

Location:
trunk/src/VBox/VMM
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/CPUM.cpp

    r30493 r30861  
    842842    if (pCPUM->aGuestCpuIdExt[0].eax >= UINT32_C(0x80000008))
    843843    {
    844         /* Only expose the virtual and physical address sizes to the guest. (EAX completely) */
     844        /* Only expose the virtual and physical address sizes to the guest. */
    845845        pCPUM->aGuestCpuIdExt[8].eax &= UINT32_C(0x0000ffff);
    846846        pCPUM->aGuestCpuIdExt[8].ebx = pCPUM->aGuestCpuIdExt[8].edx = 0;  /* reserved */
     
    22832283        return VERR_INTERNAL_ERROR_2;
    22842284    }
     2285
     2286    /* Notify PGM of the NXE states in case they've changed. */
     2287    for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
     2288        PGMNotifyNxeChanged(&pVM->aCpus[iCpu], !!(pVM->aCpus[iCpu].cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE));
    22852289    return VINF_SUCCESS;
    22862290}
  • trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp

    r30263 r30861  
    2424#include <VBox/patm.h>
    2525#include <VBox/dbgf.h>
     26#include <VBox/pdm.h>
     27#include <VBox/pgm.h>
    2628#include <VBox/mm.h>
    2729#include "CPUMInternal.h"
     
    730732
    731733
    732 VMMDECL(uint64_t)  CPUMGetGuestMsr(PVMCPU pVCpu, unsigned idMsr)
    733 {
    734     uint64_t u64 = 0;
    735     uint8_t  u8Multiplier = 4;
    736 
     734/**
     735 * Query an MSR.
     736 *
     737 * The caller is responsible for checking privilege if the call is the result
     738 * of a RDMSR instruction.  We'll do the rest.
     739 *
     740 * @retval  VINF_SUCCESS on success.
     741 * @retval  VERR_CPUM_RAISE_GP_0 on failure (invalid MSR), the caller is
     742 *          expected to take the appropriate actions. @a *puValue is set to 0.
     743 * @param   pVCpu               The virtual CPU to operate on.
     744 * @param   idMsr               The MSR.
     745 * @param   puValue             Where to return the value..
     746 *
     747 * @remarks This will always return the right values, even when we're in the
     748 *          recompiler.
     749 */
     750VMMDECL(int) CPUMQueryGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *puValue)
     751{
     752    /*
     753     * If we don't indicate MSR support in the CPUID feature bits, indicate
     754     * that a #GP(0) should be raised.
     755     */
     756    if (!(pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_MSR))
     757    {
     758        *puValue = 0;
     759        return VERR_CPUM_RAISE_GP_0;
     760    }
     761
     762    int rc = VINF_SUCCESS;
     763    uint8_t const u8Multiplier = 4;
    737764    switch (idMsr)
    738765    {
    739766        case MSR_IA32_TSC:
    740             u64 = TMCpuTickGet(pVCpu);
     767            *puValue = TMCpuTickGet(pVCpu);
     768            break;
     769
     770        case MSR_IA32_APICBASE:
     771            rc = PDMApicGetBase(pVCpu->CTX_SUFF(pVM), puValue);
     772            if (RT_SUCCESS(rc))
     773                rc = VINF_SUCCESS;
     774            else
     775            {
     776                *puValue = 0;
     777                rc = VERR_CPUM_RAISE_GP_0;
     778            }
    741779            break;
    742780
    743781        case MSR_IA32_CR_PAT:
    744             u64 = pVCpu->cpum.s.Guest.msrPAT;
     782            *puValue = pVCpu->cpum.s.Guest.msrPAT;
    745783            break;
    746784
    747785        case MSR_IA32_SYSENTER_CS:
    748             u64 = pVCpu->cpum.s.Guest.SysEnter.cs;
     786            *puValue = pVCpu->cpum.s.Guest.SysEnter.cs;
    749787            break;
    750788
    751789        case MSR_IA32_SYSENTER_EIP:
    752             u64 = pVCpu->cpum.s.Guest.SysEnter.eip;
     790            *puValue = pVCpu->cpum.s.Guest.SysEnter.eip;
    753791            break;
    754792
    755793        case MSR_IA32_SYSENTER_ESP:
    756             u64 = pVCpu->cpum.s.Guest.SysEnter.esp;
     794            *puValue = pVCpu->cpum.s.Guest.SysEnter.esp;
    757795            break;
    758796
    759797        case MSR_K6_EFER:
    760             u64 = pVCpu->cpum.s.Guest.msrEFER;
     798            *puValue = pVCpu->cpum.s.Guest.msrEFER;
    761799            break;
    762800
    763801        case MSR_K8_SF_MASK:
    764             u64 = pVCpu->cpum.s.Guest.msrSFMASK;
     802            *puValue = pVCpu->cpum.s.Guest.msrSFMASK;
    765803            break;
    766804
    767805        case MSR_K6_STAR:
    768             u64 = pVCpu->cpum.s.Guest.msrSTAR;
     806            *puValue = pVCpu->cpum.s.Guest.msrSTAR;
    769807            break;
    770808
    771809        case MSR_K8_LSTAR:
    772             u64 = pVCpu->cpum.s.Guest.msrLSTAR;
     810            *puValue = pVCpu->cpum.s.Guest.msrLSTAR;
    773811            break;
    774812
    775813        case MSR_K8_CSTAR:
    776             u64 = pVCpu->cpum.s.Guest.msrCSTAR;
     814            *puValue = pVCpu->cpum.s.Guest.msrCSTAR;
     815            break;
     816
     817        case MSR_K8_FS_BASE:
     818            *puValue = pVCpu->cpum.s.Guest.fsHid.u64Base;
     819            break;
     820
     821        case MSR_K8_GS_BASE:
     822            *puValue = pVCpu->cpum.s.Guest.gsHid.u64Base;
    777823            break;
    778824
    779825        case MSR_K8_KERNEL_GS_BASE:
    780             u64 = pVCpu->cpum.s.Guest.msrKERNELGSBASE;
     826            *puValue = pVCpu->cpum.s.Guest.msrKERNELGSBASE;
    781827            break;
    782828
    783829        case MSR_K8_TSC_AUX:
    784             u64 = pVCpu->cpum.s.GuestMsr.msr.tscAux;
     830            *puValue = pVCpu->cpum.s.GuestMsr.msr.tscAux;
    785831            break;
    786832
    787833        case MSR_IA32_PERF_STATUS:
    788             /** @todo: could really be not exactly correct, maybe use host's values */
    789             /* Keep consistent with helper_rdmsr() in REM */
    790             u64 =     (1000ULL                      /* TSC increment by tick */)
    791                     | ((uint64_t)u8Multiplier << 24 /* CPU multiplier (aka bus ratio) min */       )
    792                     | ((uint64_t)u8Multiplier << 40 /* CPU multiplier (aka bus ratio) max */       );
    793             break;
    794 
    795         case  MSR_IA32_FSB_CLOCK_STS:
    796             /**
     834            /** @todo could really be not exactly correct, maybe use host's values */
     835            *puValue = UINT64_C(1000)                 /* TSC increment by tick */
     836                     | ((uint64_t)u8Multiplier << 24) /* CPU multiplier (aka bus ratio) min */
     837                     | ((uint64_t)u8Multiplier << 40) /* CPU multiplier (aka bus ratio) max */;
     838            break;
     839
     840        case MSR_IA32_FSB_CLOCK_STS:
     841            /*
    797842             * Encoded as:
    798843             * 0 - 266
     
    802847             * 5 - return 100
    803848             */
    804             u64 = (2 << 4);
     849            *puValue = (2 << 4);
    805850            break;
    806851
    807852        case MSR_IA32_PLATFORM_INFO:
    808             u64 =     ((u8Multiplier)<<8              /* Flex ratio max */)
    809                     | ((uint64_t)u8Multiplier << 40   /* Flex ratio min */ );
     853            *puValue = (u8Multiplier << 8)            /* Flex ratio max */
     854                     | ((uint64_t)u8Multiplier << 40) /* Flex ratio min */;
    810855            break;
    811856
    812857        case MSR_IA32_THERM_STATUS:
    813858            /* CPU temperature reltive to TCC, to actually activate, CPUID leaf 6 EAX[0] must be set */
    814             u64 = (1 << 31) /* validity bit */ |
    815                   (20 << 16) /* degrees till TCC */;
     859            *puValue = ( 1 << 31) /* validity bit */
     860                     | (20 << 16) /* degrees till TCC */;
    816861            break;
    817862
     
    819864#if 0
    820865            /* Needs to be tested more before enabling. */
    821             u64 = pVCpu->cpum.s.GuestMsr.msr.miscEnable;
     866            *puValue = pVCpu->cpum.s.GuestMsr.msr.miscEnable;
    822867#else
    823             u64 = 0;
     868            *puValue = 0;
    824869#endif
    825870            break;
    826871
    827         /* fs & gs base skipped on purpose as the current context might not be up-to-date. */
     872#if 0 /*def IN_RING0 */
     873        case MSR_IA32_PLATFORM_ID:
     874        case MSR_IA32_BIOS_SIGN_ID:
     875            if (CPUMGetCPUVendor(pVM) == CPUMCPUVENDOR_INTEL)
     876            {
     877                /* Available since the P6 family. VT-x implies that this feature is present. */
     878                if (idMsr == MSR_IA32_PLATFORM_ID)
     879                    *puValue = ASMRdMsr(MSR_IA32_PLATFORM_ID);
     880                else if (idMsr == MSR_IA32_BIOS_SIGN_ID)
     881                    *puValue = ASMRdMsr(MSR_IA32_BIOS_SIGN_ID);
     882                break;
     883            }
     884            /* no break */
     885#endif
     886
    828887        default:
    829             AssertFailed();
     888            /* In X2APIC specification this range is reserved for APIC control. */
     889            if (    idMsr >= MSR_IA32_APIC_START
     890                &&  idMsr <  MSR_IA32_APIC_END)
     891            {
     892                rc = PDMApicReadMSR(pVCpu->CTX_SUFF(pVM), pVCpu->idCpu, idMsr, puValue);
     893                if (RT_SUCCESS(rc))
     894                    rc = VINF_SUCCESS;
     895                else
     896                {
     897                    *puValue = 0;
     898                    rc = VERR_CPUM_RAISE_GP_0;
     899                }
     900            }
     901            else
     902            {
     903                *puValue = 0;
     904                rc = VERR_CPUM_RAISE_GP_0;
     905            }
    830906            break;
    831907    }
    832     return u64;
    833 }
    834 
    835 VMMDECL(void) CPUMSetGuestMsr(PVMCPU pVCpu, unsigned idMsr, uint64_t valMsr)
    836 {
    837     /* On purpose only a limited number of MSRs; use the emulation function to update the others. */
     908
     909    return rc;
     910}
     911
     912
     913/**
     914 * Sets the MSR.
     915 *
     916 * The caller is responsible for checking privilege if the call is the result
     917 * of a WRMSR instruction.  We'll do the rest.
     918 *
     919 * @retval  VINF_SUCCESS on success.
     920 * @retval  VERR_CPUM_RAISE_GP_0 on failure, the caller is expected to take the
     921 *          appropriate actions.
     922 *
     923 * @param   pVCpu       The virtual CPU to operate on.
     924 * @param   idMsr       The MSR id.
     925 * @param   uValue      The value to set.
     926 *
     927 * @remarks Everyone changing MSR values, including the recompiler, shall do it
     928 *          by calling this method.  This makes sure we have current values and
     929 *          that we trigger all the right actions when something changes.
     930 */
     931VMMDECL(int) CPUMSetGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t uValue)
     932{
     933    /*
     934     * If we don't indicate MSR support in the CPUID feature bits, indicate
     935     * that a #GP(0) should be raised.
     936     */
     937    if (!(pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_MSR))
     938        return VERR_CPUM_RAISE_GP_0;
     939
     940    int rc = VINF_SUCCESS;
    838941    switch (idMsr)
    839942    {
     943        case MSR_IA32_MISC_ENABLE:
     944            pVCpu->cpum.s.GuestMsr.msr.miscEnable = uValue;
     945            break;
     946
     947        case MSR_IA32_TSC:
     948            TMCpuTickSet(pVCpu->CTX_SUFF(pVM), pVCpu, uValue);
     949            break;
     950
     951        case MSR_IA32_APICBASE:
     952            rc = PDMApicSetBase(pVCpu->CTX_SUFF(pVM), uValue);
     953            if (rc != VINF_SUCCESS)
     954                rc = VERR_CPUM_RAISE_GP_0;
     955            break;
     956
     957        case MSR_IA32_CR_PAT:
     958            pVCpu->cpum.s.Guest.msrPAT      = uValue;
     959            break;
     960
     961        case MSR_IA32_SYSENTER_CS:
     962            pVCpu->cpum.s.Guest.SysEnter.cs = uValue & 0xffff; /* 16 bits selector */
     963            break;
     964
     965        case MSR_IA32_SYSENTER_EIP:
     966            pVCpu->cpum.s.Guest.SysEnter.eip = uValue;
     967            break;
     968
     969        case MSR_IA32_SYSENTER_ESP:
     970            pVCpu->cpum.s.Guest.SysEnter.esp = uValue;
     971            break;
     972
     973        case MSR_K6_EFER:
     974        {
     975            PVM             pVM          = pVCpu->CTX_SUFF(pVM);
     976            uint64_t const  uOldEFER     = pVCpu->cpum.s.Guest.msrEFER;
     977            uint32_t const  fExtFeatures = pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
     978                                         ? pVM->cpum.s.aGuestCpuIdExt[1].edx
     979                                         : 0;
     980            uint64_t        fMask        = 0;
     981
     982            /* Filter out those bits the guest is allowed to change. (e.g. LMA is read-only) */
     983            if (fExtFeatures & X86_CPUID_AMD_FEATURE_EDX_NX)
     984                fMask |= MSR_K6_EFER_NXE;
     985            if (fExtFeatures & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE)
     986                fMask |= MSR_K6_EFER_LME;
     987            if (fExtFeatures & X86_CPUID_AMD_FEATURE_EDX_SEP)
     988                fMask |= MSR_K6_EFER_SCE;
     989            if (fExtFeatures & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
     990                fMask |= MSR_K6_EFER_FFXSR;
     991
     992            /* Check for illegal MSR_K6_EFER_LME transitions: not allowed to change LME if
     993               paging is enabled. (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */
     994            if (    (uOldEFER & MSR_K6_EFER_LME) != (uValue & fMask & MSR_K6_EFER_LME)
     995                &&  (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG))
     996            {
     997                Log(("Illegal MSR_K6_EFER_LME change: paging is enabled!!\n"));
     998                return VERR_CPUM_RAISE_GP_0;
     999            }
     1000
     1001            /* There are a few more: e.g. MSR_K6_EFER_LMSLE */
     1002            AssertMsg(!(uValue & ~(MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA /* ignored anyway */ | MSR_K6_EFER_SCE | MSR_K6_EFER_FFXSR)),
     1003                      ("Unexpected value %RX64\n", uValue));
     1004            pVCpu->cpum.s.Guest.msrEFER = (uOldEFER & ~fMask) | (uValue & fMask);
     1005
     1006            /* AMD64 Architecture Programmer's Manual: 15.15 TLB Control; flush the TLB
     1007               if MSR_K6_EFER_NXE, MSR_K6_EFER_LME or MSR_K6_EFER_LMA are changed. */
     1008            if (   (uValue                      & (MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA))
     1009                != (pVCpu->cpum.s.Guest.msrEFER & (MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA)))
     1010            {
     1011                /// @todo PGMFlushTLB(pVCpu, cr3, true /*fGlobal*/);
     1012                HWACCMFlushTLB(pVCpu);
     1013
     1014                /* Notify PGM about NXE changes. */
     1015                if (   (uValue        & MSR_K6_EFER_NXE)
     1016                    != (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE))
     1017                    PGMNotifyNxeChanged(pVCpu, !!(uValue & MSR_K6_EFER_NXE));
     1018            }
     1019            break;
     1020        }
     1021
     1022        case MSR_K8_SF_MASK:
     1023            pVCpu->cpum.s.Guest.msrSFMASK       = uValue;
     1024            break;
     1025
     1026        case MSR_K6_STAR:
     1027            pVCpu->cpum.s.Guest.msrSTAR         = uValue;
     1028            break;
     1029
     1030        case MSR_K8_LSTAR:
     1031            pVCpu->cpum.s.Guest.msrLSTAR        = uValue;
     1032            break;
     1033
     1034        case MSR_K8_CSTAR:
     1035            pVCpu->cpum.s.Guest.msrCSTAR        = uValue;
     1036            break;
     1037
     1038        case MSR_K8_FS_BASE:
     1039            pVCpu->cpum.s.Guest.fsHid.u64Base   = uValue;
     1040            break;
     1041
     1042        case MSR_K8_GS_BASE:
     1043            pVCpu->cpum.s.Guest.gsHid.u64Base   = uValue;
     1044            break;
     1045
     1046        case MSR_K8_KERNEL_GS_BASE:
     1047            pVCpu->cpum.s.Guest.msrKERNELGSBASE = uValue;
     1048            break;
     1049
    8401050        case MSR_K8_TSC_AUX:
    841             pVCpu->cpum.s.GuestMsr.msr.tscAux = valMsr;
    842             break;
    843 
    844         case MSR_IA32_MISC_ENABLE:
    845             pVCpu->cpum.s.GuestMsr.msr.miscEnable = valMsr;
     1051            pVCpu->cpum.s.GuestMsr.msr.tscAux   = uValue;
    8461052            break;
    8471053
    8481054        default:
    849             AssertFailed();
     1055            /* In X2APIC specification this range is reserved for APIC control. */
     1056            if (    idMsr >= MSR_IA32_APIC_START
     1057                &&  idMsr <  MSR_IA32_APIC_END)
     1058            {
     1059                rc = PDMApicWriteMSR(pVCpu->CTX_SUFF(pVM), pVCpu->idCpu, idMsr, uValue);
     1060                if (rc != VINF_SUCCESS)
     1061                    rc = VERR_CPUM_RAISE_GP_0;
     1062            }
     1063            else
     1064            {
     1065                /* We should actually trigger a #GP here, but don't as that might cause more trouble. */
     1066                /** @todo rc = VERR_CPUM_RAISE_GP_0 */
     1067                Log(("CPUMSetGuestMsr: Unknown MSR %#x attempted set to %#llx\n", idMsr, uValue));
     1068            }
    8501069            break;
    8511070    }
    852 }
     1071    return rc;
     1072}
     1073
    8531074
    8541075VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
  • trunk/src/VBox/VMM/VMMAll/EMAll.cpp

    r30567 r30861  
    24962496    pCtx->rdx = (uTicks >> 32ULL);
    24972497    /* Low dword of the TSC_AUX msr only. */
    2498     pCtx->rcx = (uint32_t)CPUMGetGuestMsr(pVCpu, MSR_K8_TSC_AUX);
     2498    CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &pCtx->rcx);
     2499    pCtx->rcx &= UINT32_C(0xffffffff);
    24992500
    25002501    return VINF_SUCCESS;
     
    27372738 * @param   pVCpu       The VMCPU handle.
    27382739 * @param   pRegFrame   The register frame.
    2739  *
    27402740 */
    27412741VMMDECL(int) EMInterpretRdmsr(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
    27422742{
    2743     uint32_t u32Dummy, u32Features, cpl;
    2744     uint64_t val;
    2745     CPUMCTX *pCtx;
    2746     int      rc = VINF_SUCCESS;
    2747 
    27482743    /** @todo According to the Intel manuals, there's a REX version of RDMSR that is slightly different.
    27492744     *  That version clears the high dwords of both RDX & RAX */
    2750     pCtx = CPUMQueryGuestCtxPtr(pVCpu);
    27512745
    27522746    /* Get the current privilege level. */
    2753     cpl = CPUMGetGuestCPL(pVCpu, pRegFrame);
    2754     if (cpl != 0)
     2747    if (CPUMGetGuestCPL(pVCpu, pRegFrame) != 0)
    27552748        return VERR_EM_INTERPRETER; /* supervisor only */
    27562749
    2757     CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
    2758     if (!(u32Features & X86_CPUID_FEATURE_EDX_MSR))
    2759         return VERR_EM_INTERPRETER; /* not supported */
    2760 
    2761     switch (pRegFrame->ecx)
    2762     {
    2763     case MSR_IA32_TSC:
    2764         val = TMCpuTickGet(pVCpu);
    2765         break;
    2766 
    2767     case MSR_IA32_APICBASE:
    2768         rc = PDMApicGetBase(pVM, &val);
    2769         AssertRC(rc);
    2770         break;
    2771 
    2772     case MSR_IA32_CR_PAT:
    2773         val = pCtx->msrPAT;
    2774         break;
    2775 
    2776     case MSR_IA32_SYSENTER_CS:
    2777         val = pCtx->SysEnter.cs;
    2778         break;
    2779 
    2780     case MSR_IA32_SYSENTER_EIP:
    2781         val = pCtx->SysEnter.eip;
    2782         break;
    2783 
    2784     case MSR_IA32_SYSENTER_ESP:
    2785         val = pCtx->SysEnter.esp;
    2786         break;
    2787 
    2788     case MSR_K6_EFER:
    2789         val = pCtx->msrEFER;
    2790         break;
    2791 
    2792     case MSR_K8_SF_MASK:
    2793         val = pCtx->msrSFMASK;
    2794         break;
    2795 
    2796     case MSR_K6_STAR:
    2797         val = pCtx->msrSTAR;
    2798         break;
    2799 
    2800     case MSR_K8_LSTAR:
    2801         val = pCtx->msrLSTAR;
    2802         break;
    2803 
    2804     case MSR_K8_CSTAR:
    2805         val = pCtx->msrCSTAR;
    2806         break;
    2807 
    2808     case MSR_K8_FS_BASE:
    2809         val = pCtx->fsHid.u64Base;
    2810         break;
    2811 
    2812     case MSR_K8_GS_BASE:
    2813         val = pCtx->gsHid.u64Base;
    2814         break;
    2815 
    2816     case MSR_K8_KERNEL_GS_BASE:
    2817         val = pCtx->msrKERNELGSBASE;
    2818         break;
    2819 
    2820     case MSR_K8_TSC_AUX:
    2821         val = CPUMGetGuestMsr(pVCpu, MSR_K8_TSC_AUX);
    2822         break;
    2823 
    2824     case MSR_IA32_PERF_STATUS:
    2825     case MSR_IA32_PLATFORM_INFO:
    2826     case MSR_IA32_MISC_ENABLE:
    2827     case MSR_IA32_FSB_CLOCK_STS:
    2828     case MSR_IA32_THERM_STATUS:
    2829         val = CPUMGetGuestMsr(pVCpu, pRegFrame->ecx);
    2830         break;
    2831 
    2832 #if 0 /*def IN_RING0 */
    2833     case MSR_IA32_PLATFORM_ID:
    2834     case MSR_IA32_BIOS_SIGN_ID:
    2835         if (CPUMGetCPUVendor(pVM) == CPUMCPUVENDOR_INTEL)
    2836         {
    2837             /* Available since the P6 family. VT-x implies that this feature is present. */
    2838             if (pRegFrame->ecx == MSR_IA32_PLATFORM_ID)
    2839                 val = ASMRdMsr(MSR_IA32_PLATFORM_ID);
    2840             else
    2841             if (pRegFrame->ecx == MSR_IA32_BIOS_SIGN_ID)
    2842                 val = ASMRdMsr(MSR_IA32_BIOS_SIGN_ID);
    2843             break;
    2844         }
    2845         /* no break */
    2846 #endif
    2847     default:
    2848         /* In X2APIC specification this range is reserved for APIC control. */
    2849         if (    pRegFrame->ecx >= MSR_IA32_APIC_START
    2850             &&  pRegFrame->ecx <  MSR_IA32_APIC_END)
    2851             rc = PDMApicReadMSR(pVM, pVCpu->idCpu, pRegFrame->ecx, &val);
    2852         else
    2853             /* We should actually trigger a #GP here, but don't as that will cause more trouble. */
    2854             val = 0;
    2855         break;
    2856     }
    2857     LogFlow(("EMInterpretRdmsr %s (%x) -> val=%RX64\n", emMSRtoString(pRegFrame->ecx), pRegFrame->ecx, val));
    2858     if (rc == VINF_SUCCESS)
    2859     {
    2860         pRegFrame->rax = (uint32_t) val;
    2861         pRegFrame->rdx = (uint32_t)(val >> 32);
    2862     }
     2750    uint64_t uValue;
     2751    int rc = CPUMQueryGuestMsr(pVCpu, pRegFrame->ecx, &uValue);
     2752    if (RT_UNLIKELY(rc != VINF_SUCCESS))
     2753    {
     2754        Assert(rc == VERR_CPUM_RAISE_GP_0);
     2755        return VERR_EM_INTERPRETER;
     2756    }
     2757    pRegFrame->rax = (uint32_t) uValue;
     2758    pRegFrame->rdx = (uint32_t)(uValue >> 32);
     2759    LogFlow(("EMInterpretRdmsr %s (%x) -> %RX64\n", emMSRtoString(pRegFrame->ecx), pRegFrame->ecx, uValue));
    28632760    return rc;
    28642761}
     
    28702767static int emInterpretRdmsr(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
    28712768{
    2872     /* Note: the Intel manual claims there's a REX version of RDMSR that's slightly different, so we play safe by completely disassembling the instruction. */
     2769    /* Note: The Intel manual claims there's a REX version of RDMSR that's slightly
     2770             different, so we play safe by completely disassembling the instruction. */
    28732771    Assert(!(pDis->prefix & PREFIX_REX));
    28742772    return EMInterpretRdmsr(pVM, pVCpu, pRegFrame);
     
    28862784VMMDECL(int) EMInterpretWrmsr(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
    28872785{
    2888     uint32_t u32Dummy, u32Features, cpl;
    2889     uint64_t val;
    2890     CPUMCTX *pCtx;
    2891 
    2892     /* Note: works the same in 32 and 64 bits modes. */
    2893     pCtx = CPUMQueryGuestCtxPtr(pVCpu);
    2894 
    2895     /* Get the current privilege level. */
    2896     cpl = CPUMGetGuestCPL(pVCpu, pRegFrame);
    2897     if (cpl != 0)
    2898         return VERR_EM_INTERPRETER; /* supervisor only */
    2899 
    2900     CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
    2901     if (!(u32Features & X86_CPUID_FEATURE_EDX_MSR))
    2902         return VERR_EM_INTERPRETER; /* not supported */
    2903 
    2904     val = RT_MAKE_U64(pRegFrame->eax, pRegFrame->edx);
    2905     LogFlow(("EMInterpretWrmsr %s (%x) val=%RX64\n", emMSRtoString(pRegFrame->ecx), pRegFrame->ecx, val));
    2906     switch (pRegFrame->ecx)
    2907     {
    2908     case MSR_IA32_TSC:
    2909         TMCpuTickSet(pVM, pVCpu, val);
    2910         break;
    2911 
    2912     case MSR_IA32_APICBASE:
    2913     {
    2914         int rc = PDMApicSetBase(pVM, val);
    2915         AssertRC(rc);
    2916         break;
    2917     }
    2918 
    2919     case MSR_IA32_CR_PAT:
    2920         pCtx->msrPAT = val;
    2921         break;
    2922 
    2923     case MSR_IA32_SYSENTER_CS:
    2924         pCtx->SysEnter.cs = val & 0xffff; /* 16 bits selector */
    2925         break;
    2926 
    2927     case MSR_IA32_SYSENTER_EIP:
    2928         pCtx->SysEnter.eip = val;
    2929         break;
    2930 
    2931     case MSR_IA32_SYSENTER_ESP:
    2932         pCtx->SysEnter.esp = val;
    2933         break;
    2934 
    2935     case MSR_K6_EFER:
    2936     {
    2937         uint64_t uMask = 0;
    2938         uint64_t oldval = pCtx->msrEFER;
    2939 
    2940         /* Filter out those bits the guest is allowed to change. (e.g. LMA is read-only) */
    2941         CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
    2942         if (u32Features & X86_CPUID_AMD_FEATURE_EDX_NX)
    2943             uMask |= MSR_K6_EFER_NXE;
    2944         if (u32Features & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE)
    2945             uMask |= MSR_K6_EFER_LME;
    2946         if (u32Features & X86_CPUID_AMD_FEATURE_EDX_SEP)
    2947             uMask |= MSR_K6_EFER_SCE;
    2948         if (u32Features & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
    2949             uMask |= MSR_K6_EFER_FFXSR;
    2950 
    2951         /* Check for illegal MSR_K6_EFER_LME transitions: not allowed to change LME if paging is enabled. (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */
    2952         if (    ((pCtx->msrEFER & MSR_K6_EFER_LME) != (val & uMask & MSR_K6_EFER_LME))
    2953             &&  (pCtx->cr0 & X86_CR0_PG))
    2954         {
    2955             AssertMsgFailed(("Illegal MSR_K6_EFER_LME change: paging is enabled!!\n"));
    2956             return VERR_EM_INTERPRETER; /* @todo generate #GP(0) */
    2957         }
    2958 
    2959         /* There are a few more: e.g. MSR_K6_EFER_LMSLE */
    2960         AssertMsg(!(val & ~(MSR_K6_EFER_NXE|MSR_K6_EFER_LME|MSR_K6_EFER_LMA /* ignored anyway */ |MSR_K6_EFER_SCE|MSR_K6_EFER_FFXSR)), ("Unexpected value %RX64\n", val));
    2961         pCtx->msrEFER = (pCtx->msrEFER & ~uMask) | (val & uMask);
    2962 
    2963         /* AMD64 Architecture Programmer's Manual: 15.15 TLB Control; flush the TLB if MSR_K6_EFER_NXE, MSR_K6_EFER_LME or MSR_K6_EFER_LMA are changed. */
    2964         if ((oldval & (MSR_K6_EFER_NXE|MSR_K6_EFER_LME|MSR_K6_EFER_LMA)) != (pCtx->msrEFER & (MSR_K6_EFER_NXE|MSR_K6_EFER_LME|MSR_K6_EFER_LMA)))
    2965             HWACCMFlushTLB(pVCpu);
    2966 
    2967         break;
    2968     }
    2969 
    2970     case MSR_K8_SF_MASK:
    2971         pCtx->msrSFMASK = val;
    2972         break;
    2973 
    2974     case MSR_K6_STAR:
    2975         pCtx->msrSTAR = val;
    2976         break;
    2977 
    2978     case MSR_K8_LSTAR:
    2979         pCtx->msrLSTAR = val;
    2980         break;
    2981 
    2982     case MSR_K8_CSTAR:
    2983         pCtx->msrCSTAR = val;
    2984         break;
    2985 
    2986     case MSR_K8_FS_BASE:
    2987         pCtx->fsHid.u64Base = val;
    2988         break;
    2989 
    2990     case MSR_K8_GS_BASE:
    2991         pCtx->gsHid.u64Base = val;
    2992         break;
    2993 
    2994     case MSR_K8_KERNEL_GS_BASE:
    2995         pCtx->msrKERNELGSBASE = val;
    2996         break;
    2997 
    2998     case MSR_K8_TSC_AUX:
    2999     case MSR_IA32_MISC_ENABLE:
    3000         CPUMSetGuestMsr(pVCpu, pRegFrame->ecx, val);
    3001         break;
    3002 
    3003     default:
    3004         /* In X2APIC specification this range is reserved for APIC control. */
    3005         if (    pRegFrame->ecx >= MSR_IA32_APIC_START
    3006             &&  pRegFrame->ecx <  MSR_IA32_APIC_END)
    3007             return PDMApicWriteMSR(pVM, pVCpu->idCpu, pRegFrame->ecx, val);
    3008 
    3009         /* We should actually trigger a #GP here, but don't as that might cause more trouble. */
    3010         break;
    3011     }
    3012     return VINF_SUCCESS;
     2786    /* Check the current privilege level, this instruction is supervisor only. */
     2787    if (CPUMGetGuestCPL(pVCpu, pRegFrame) != 0)
     2788        return VERR_EM_INTERPRETER; /** @todo raise \#GP(0) */
     2789
     2790    int rc = CPUMSetGuestMsr(pVCpu, pRegFrame->ecx, RT_MAKE_U64(pRegFrame->eax, pRegFrame->edx));
     2791    if (rc != VINF_SUCCESS)
     2792    {
     2793        Assert(rc == VERR_CPUM_RAISE_GP_0);
     2794        return VERR_EM_INTERPRETER;
     2795    }
     2796    LogFlow(("EMInterpretWrmsr %s (%x) val=%RX64\n", emMSRtoString(pRegFrame->ecx), pRegFrame->ecx,
     2797             RT_MAKE_U64(pRegFrame->eax, pRegFrame->edx)));
     2798    return rc;
    30132799}
    30142800
  • trunk/src/VBox/VMM/VMMAll/PGMAll.cpp

    r30831 r30861  
    938938        if (HWACCMIsNestedPagingActive(pVM) || !CPUMIsGuestPagingEnabled(pVCpu))
    939939        {
    940             /* AMD-V nested paging or real/protected mode without paging */
     940            /* AMD-V nested paging or real/protected mode without paging. */
    941941            GCPdPt  = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
    942942            enmKind = PGMPOOLKIND_PAE_PD_PHYS;
     
    981981
    982982# if defined(IN_RC)
    983         /* In 32 bits PAE mode we *must* invalidate the TLB when changing a PDPT entry; the CPU fetches them only during cr3 load, so any
     983        /*
     984         * In 32 bits PAE mode we *must* invalidate the TLB when changing a
     985         * PDPT entry; the CPU fetches them only during cr3 load, so any
    984986         * non-present PDPT will continue to cause page faults.
    985987         */
     
    20992101        default:                return "unknown mode value";
    21002102    }
     2103}
     2104
     2105
     2106
     2107/**
     2108 * Notification from CPUM that the EFER.NXE bit has changed.
     2109 *
     2110 * @param   pVCpu       The virtual CPU for which EFER changed.
     2111 * @param   fNxe        The new NXE state.
     2112 */
     2113VMM_INT_DECL(void) PGMNotifyNxeChanged(PVMCPU pVCpu, bool fNxe)
     2114{
     2115    /* later */
    21012116}
    21022117
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette