VirtualBox

Changeset 78592 in vbox for trunk/src


Ignore:
Timestamp:
May 20, 2019 10:07:18 AM (6 years ago)
Author:
vboxsync
Message:

VMM/IEM: Nested VMX: bugref:9180 Fixed vmread/vmwrite instruction execution workers to not need enmAddrMode anymore (callers do the work). Fixed exit code for iemVmxVmexit(). Fixed updating VM-exit MSR store area on VM-exit. Fixed minor code issues, style and assertions.

Location:
trunk/src/VBox/VMM/VMMAll
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAll.cpp

    r78525 r78592  
    1598415984
    1598515985    VBOXSTRICTRC   rcStrict;
    15986     uint8_t const  cbInstr   = pExitInfo->cbInstr;
    15987     uint32_t const uFieldEnc = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
     15986    uint8_t const  cbInstr       = pExitInfo->cbInstr;
     15987    bool const     fIs64BitMode  = RT_BOOL(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
     15988    uint64_t const u64FieldEnc   = fIs64BitMode
     15989                                 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2)
     15990                                 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
    1598815991    if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
    1598915992    {
    15990         if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
     15993        if (fIs64BitMode)
    1599115994        {
    1599215995            uint64_t *pu64Dst = iemGRegRefU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
    15993             rcStrict = iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, uFieldEnc, pExitInfo);
     15996            rcStrict = iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, u64FieldEnc, pExitInfo);
    1599415997        }
    1599515998        else
    1599615999        {
    1599716000            uint32_t *pu32Dst = iemGRegRefU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
    15998             rcStrict = iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, uFieldEnc, pExitInfo);
     16001            rcStrict = iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, u64FieldEnc, pExitInfo);
    1599916002        }
    1600016003    }
    1600116004    else
    1600216005    {
    16003         RTGCPTR GCPtrDst       = pExitInfo->GCPtrEffAddr;
    16004         uint8_t iEffSeg        = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
    16005         IEMMODE enmEffAddrMode = (IEMMODE)pExitInfo->InstrInfo.VmreadVmwrite.u3AddrSize;
    16006         rcStrict = iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrDst, uFieldEnc, pExitInfo);
     16006        RTGCPTR const GCPtrDst = pExitInfo->GCPtrEffAddr;
     16007        uint8_t const iEffSeg  = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
     16008        rcStrict = iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, GCPtrDst, u64FieldEnc, pExitInfo);
    1600716009    }
    1600816010    Assert(!pVCpu->iem.s.cActiveMappings);
     
    1602916031    uint64_t u64Val;
    1603016032    uint8_t  iEffSeg;
    16031     IEMMODE  enmEffAddrMode;
    1603216033    if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
    1603316034    {
    16034         u64Val         = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
    16035         iEffSeg        = UINT8_MAX;
    16036         enmEffAddrMode = UINT8_MAX;
     16035        u64Val  = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
     16036        iEffSeg = UINT8_MAX;
    1603716037    }
    1603816038    else
    1603916039    {
    16040         u64Val         = pExitInfo->GCPtrEffAddr;
    16041         iEffSeg        = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
    16042         enmEffAddrMode = (IEMMODE)pExitInfo->InstrInfo.VmreadVmwrite.u3AddrSize;
    16043     }
    16044     uint8_t const  cbInstr   = pExitInfo->cbInstr;
    16045     uint32_t const uFieldEnc = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
    16046     VBOXSTRICTRC rcStrict = iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, u64Val, uFieldEnc, pExitInfo);
     16040        u64Val  = pExitInfo->GCPtrEffAddr;
     16041        iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
     16042    }
     16043    uint8_t const  cbInstr     = pExitInfo->cbInstr;
     16044    uint64_t const u64FieldEnc = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT
     16045                               ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2)
     16046                               : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
     16047    VBOXSTRICTRC rcStrict = iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, u64Val, u64FieldEnc, pExitInfo);
    1604716048    Assert(!pVCpu->iem.s.cActiveMappings);
    1604816049    return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
     
    1621116212    uint8_t const  cbInstr          = pExitInfo->cbInstr;
    1621216213    RTGCPTR const  GCPtrInvvpidDesc = pExitInfo->GCPtrEffAddr;
    16213     uint64_t const uInvvpidType     = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT
     16214    uint64_t const u64InvvpidType   = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT
    1621416215                                    ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.Inv.iReg2)
    1621516216                                    : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.Inv.iReg2);
    16216     VBOXSTRICTRC rcStrict = iemVmxInvvpid(pVCpu, cbInstr, iEffSeg, GCPtrInvvpidDesc, uInvvpidType, pExitInfo);
     16217    VBOXSTRICTRC rcStrict = iemVmxInvvpid(pVCpu, cbInstr, iEffSeg, GCPtrInvvpidDesc, u64InvvpidType, pExitInfo);
    1621716218    Assert(!pVCpu->iem.s.cActiveMappings);
    1621816219    return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h

    r78525 r78592  
    19041904        IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStoreCount);
    19051905
    1906     PVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pExitMsrStoreArea);
     1906    /*
     1907     * Optimization if the guest hypervisor is using the same guest-physical page for both
     1908     * the VM-entry MSR-load area as well as the VM-exit MSR store area.
     1909     */
     1910    PVMXAUTOMSR    pMsrArea;
     1911    RTGCPHYS const GCPhysVmEntryMsrLoadArea = pVmcs->u64AddrEntryMsrLoad.u;
     1912    RTGCPHYS const GCPhysVmExitMsrStoreArea = pVmcs->u64AddrExitMsrStore.u;
     1913    if (GCPhysVmEntryMsrLoadArea == GCPhysVmExitMsrStoreArea)
     1914        pMsrArea = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pEntryMsrLoadArea);
     1915    else
     1916    {
     1917        int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pExitMsrStoreArea),
     1918                                         GCPhysVmExitMsrStoreArea, cMsrs * sizeof(VMXAUTOMSR));
     1919        if (RT_SUCCESS(rc))
     1920            pMsrArea = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pExitMsrStoreArea);
     1921        else
     1922        {
     1923            AssertMsgFailed(("VM-exit: Failed to read MSR auto-store area at %#RGp, rc=%Rrc\n", GCPhysVmExitMsrStoreArea, rc));
     1924            IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStorePtrReadPhys);
     1925        }
     1926    }
     1927
     1928    /*
     1929     * Update VM-exit MSR store area.
     1930     */
     1931    PVMXAUTOMSR pMsr = pMsrArea;
    19071932    Assert(pMsr);
    19081933    for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
     
    19361961    }
    19371962
    1938     RTGCPHYS const GCPhysVmExitMsrStoreArea = pVmcs->u64AddrExitMsrStore.u;
    1939     int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVmExitMsrStoreArea,
    1940                                       pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pExitMsrStoreArea), cMsrs * sizeof(VMXAUTOMSR));
     1963    /*
     1964     * Commit the VM-exit MSR store are to guest memory.
     1965     */
     1966    int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVmExitMsrStoreArea, pMsrArea, cMsrs * sizeof(VMXAUTOMSR));
    19411967    if (RT_SUCCESS(rc))
    1942     { /* likely */ }
    1943     else
    1944     {
    1945         AssertMsgFailed(("VM-exit: Failed to write MSR auto-store area at %#RGp, rc=%Rrc\n", GCPhysVmExitMsrStoreArea, rc));
    1946         IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStorePtrWritePhys);
    1947     }
     1968        return VINF_SUCCESS;
    19481969
    19491970    NOREF(uExitReason);
    19501971    NOREF(pszFailure);
    1951     return VINF_SUCCESS;
     1972
     1973    AssertMsgFailed(("VM-exit: Failed to write MSR auto-store area at %#RGp, rc=%Rrc\n", GCPhysVmExitMsrStoreArea, rc));
     1974    IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStorePtrWritePhys);
    19521975}
    19531976
     
    27722795    }
    27732796
    2774     /* The following VMCS fields are unsupported since we don't injecting SMIs into a guest. */
     2797    /* The following VMCS fields should always be zero since we don't support injecting SMIs into a guest. */
    27752798    Assert(pVmcs->u64RoIoRcx.u == 0);
    27762799    Assert(pVmcs->u64RoIoRsi.u == 0);
     
    28642887        iemSetPassUpStatus(pVCpu, rcSched);
    28652888#  endif
    2866     return VINF_SUCCESS;
     2889    return rcStrict;
    28672890# endif
    28682891}
     
    39824005    {
    39834006        fIsHwXcpt = true;
     4007
    39844008        /* NMIs have a dedicated VM-execution control for causing VM-exits. */
    39854009        if (uVector == X86_XCPT_NMI)
    3986             fIntercept = RT_BOOL(pVmcs->u32PinCtls & VMX_PIN_CTLS_NMI_EXIT);
     4010        {
     4011            if (pVmcs->u32PinCtls & VMX_PIN_CTLS_NMI_EXIT)
     4012                fIntercept = true;
     4013        }
    39874014        else
    39884015        {
     
    39974024            }
    39984025
    3999             /* Consult the exception bitmap for all hardware exceptions (except NMI). */
     4026            /* Consult the exception bitmap for all other hardware exceptions. */
     4027            Assert(uVector <= X86_XCPT_LAST);
    40004028            if (fXcptBitmap & RT_BIT(uVector))
    40014029                fIntercept = true;
     
    79427970 * @param   iEffSeg         The effective segment register to use with @a u64Val.
    79437971 *                          Pass UINT8_MAX if it is a register access.
    7944  * @param   enmEffAddrMode  The effective addressing mode (only used with memory
    7945  *                          operand).
    79467972 * @param   GCPtrDst        The guest linear address to store the VMCS field's
    79477973 *                          value.
     
    79507976 *                          be NULL.
    79517977 */
    7952 IEM_STATIC VBOXSTRICTRC iemVmxVmreadMem(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, IEMMODE enmEffAddrMode,
    7953                                         RTGCPTR GCPtrDst, uint64_t u64FieldEnc, PCVMXVEXITINFO pExitInfo)
     7978IEM_STATIC VBOXSTRICTRC iemVmxVmreadMem(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDst, uint64_t u64FieldEnc,
     7979                                        PCVMXVEXITINFO pExitInfo)
    79547980{
    79557981    uint64_t u64Dst;
     
    79597985        /*
    79607986         * Write the VMCS field's value to the location specified in guest-memory.
    7961          *
    7962          * The pointer size depends on the address size (address-size prefix allowed).
    7963          * The operand size depends on IA-32e mode (operand-size prefix not allowed).
    79647987         */
    7965         static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
    7966         Assert(enmEffAddrMode < RT_ELEMENTS(s_auAddrSizeMasks));
    7967         GCPtrDst &= s_auAddrSizeMasks[enmEffAddrMode];
    7968 
    79697988        if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
    79707989            rcStrict = iemMemStoreDataU64(pVCpu, iEffSeg, GCPtrDst, u64Dst);
     
    79958014 * @param   iEffSeg         The effective segment register to use with @a u64Val.
    79968015 *                          Pass UINT8_MAX if it is a register access.
    7997  * @param   enmEffAddrMode  The effective addressing mode (only used with memory
    7998  *                          operand).
    79998016 * @param   u64Val          The value to write (or guest linear address to the
    80008017 *                          value), @a iEffSeg will indicate if it's a memory
     
    80048021 *                          be NULL.
    80058022 */
    8006 IEM_STATIC VBOXSTRICTRC iemVmxVmwrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, IEMMODE enmEffAddrMode, uint64_t u64Val,
    8007                                       uint64_t u64FieldEnc, PCVMXVEXITINFO pExitInfo)
     8023IEM_STATIC VBOXSTRICTRC iemVmxVmwrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, uint64_t u64Val, uint64_t u64FieldEnc,
     8024                                      PCVMXVEXITINFO pExitInfo)
    80088025{
    80098026    /* Nested-guest intercept. */
     
    80568073    if (!fIsRegOperand)
    80578074    {
    8058         static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
    8059         Assert(enmEffAddrMode < RT_ELEMENTS(s_auAddrSizeMasks));
    8060         RTGCPTR const GCPtrVal = u64Val & s_auAddrSizeMasks[enmEffAddrMode];
    8061 
    80628075        /* Read the value from the specified guest memory location. */
    8063         VBOXSTRICTRC rcStrict;
     8076        VBOXSTRICTRC  rcStrict;
     8077        RTGCPTR const GCPtrVal = u64Val;
    80648078        if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
    80658079            rcStrict = iemMemFetchDataU64(pVCpu, &u64Val, iEffSeg, GCPtrVal);
     
    85198533 * @param   iEffSeg             The segment of the invvpid descriptor.
    85208534 * @param   GCPtrInvvpidDesc    The address of invvpid descriptor.
    8521  * @param   uInvvpidType        The invalidation type.
     8535 * @param   u64InvvpidType      The invalidation type.
    85228536 * @param   pExitInfo           Pointer to the VM-exit information struct. Optional,
    85238537 *                              can be NULL.
     
    85278541 */
    85288542IEM_STATIC VBOXSTRICTRC iemVmxInvvpid(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrInvvpidDesc,
    8529                                       uint64_t uInvvpidType, PCVMXVEXITINFO pExitInfo)
     8543                                      uint64_t u64InvvpidType, PCVMXVEXITINFO pExitInfo)
    85308544{
    85318545    /* Check if INVVPID instruction is supported, otherwise raise #UD. */
     
    85518565     * Validate INVVPID invalidation type.
    85528566     *
    8553      * Each of the types have a supported bit in IA32_VMX_EPT_VPID_CAP MSR.
    8554      * In theory, it's possible for a CPU to not support flushing individual addresses
    8555      * but all the other types or any other combination.
     8567     * The instruction specifies exactly ONE of the supported invalidation types.
     8568     *
     8569     * Each of the types has a bit in IA32_VMX_EPT_VPID_CAP MSR specifying if it is
     8570     * supported. In theory, it's possible for a CPU to not support flushing individual
     8571     * addresses but all the other types or any other combination. We do not take any
     8572     * shortcuts here by  assuming the types we currently expose to the guest.
    85568573     */
    85578574    uint64_t const fCaps = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64EptVpidCaps;
     
    85608577    uint8_t const fTypeAllCtx                 = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVVPID_ALL_CTX);
    85618578    uint8_t const fTypeSingleCtxRetainGlobals = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVVPID_SINGLE_CTX_RETAIN_GLOBALS);
    8562     if (   (fTypeIndivAddr              && uInvvpidType == VMXTLBFLUSHVPID_INDIV_ADDR)
    8563         || (fTypeSingleCtx              && uInvvpidType == VMXTLBFLUSHVPID_SINGLE_CONTEXT)
    8564         || (fTypeAllCtx                 && uInvvpidType == VMXTLBFLUSHVPID_ALL_CONTEXTS)
    8565         || (fTypeSingleCtxRetainGlobals && uInvvpidType == VMXTLBFLUSHVPID_SINGLE_CONTEXT_RETAIN_GLOBALS))
     8579    if (   (fTypeIndivAddr              && u64InvvpidType == VMXTLBFLUSHVPID_INDIV_ADDR)
     8580        || (fTypeSingleCtx              && u64InvvpidType == VMXTLBFLUSHVPID_SINGLE_CONTEXT)
     8581        || (fTypeAllCtx                 && u64InvvpidType == VMXTLBFLUSHVPID_ALL_CONTEXTS)
     8582        || (fTypeSingleCtxRetainGlobals && u64InvvpidType == VMXTLBFLUSHVPID_SINGLE_CONTEXT_RETAIN_GLOBALS))
    85668583    { /* likely */ }
    85678584    else
    85688585    {
    8569         Log(("invvpid: invalid/unrecognized invvpid type %#x -> VMFail\n", uInvvpidType));
     8586        Log(("invvpid: invalid/unsupported invvpid type %#x -> VMFail\n", u64InvvpidType));
    85708587        pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Invvpid_TypeInvalid;
    85718588        iemVmxVmFail(pVCpu, VMXINSTRERR_INVEPT_INVVPID_INVALID_OPERAND);
     
    85978614        uint8_t       const uVpid        = uDesc.s.Lo & UINT64_C(0xfff);
    85988615        uint64_t      const uCr3         = pVCpu->cpum.GstCtx.cr3;
    8599         switch (uInvvpidType)
     8616        switch (u64InvvpidType)
    86008617        {
    86018618            case VMXTLBFLUSHVPID_INDIV_ADDR:
     
    86198636                else
    86208637                {
    8621                     Log(("invvpid: invalid VPID %#x for invalidation type %u -> VMFail\n", uVpid, uInvvpidType));
     8638                    Log(("invvpid: invalid VPID %#x for invalidation type %u -> VMFail\n", uVpid, u64InvvpidType));
    86228639                    pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Invvpid_Type0InvalidVpid;
    86238640                    iemVmxVmFail(pVCpu, VMXINSTRERR_INVEPT_INVVPID_INVALID_OPERAND);
     
    86378654                else
    86388655                {
    8639                     Log(("invvpid: invalid VPID %#x for invalidation type %u -> VMFail\n", uVpid, uInvvpidType));
     8656                    Log(("invvpid: invalid VPID %#x for invalidation type %u -> VMFail\n", uVpid, u64InvvpidType));
    86408657                    pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Invvpid_Type1InvalidVpid;
    86418658                    iemVmxVmFail(pVCpu, VMXINSTRERR_INVEPT_INVVPID_INVALID_OPERAND);
     
    86648681                else
    86658682                {
    8666                     Log(("invvpid: invalid VPID %#x for invalidation type %u -> VMFail\n", uVpid, uInvvpidType));
     8683                    Log(("invvpid: invalid VPID %#x for invalidation type %u -> VMFail\n", uVpid, u64InvvpidType));
    86678684                    pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Invvpid_Type3InvalidVpid;
    86688685                    iemVmxVmFail(pVCpu, VMXINSTRERR_INVEPT_INVVPID_INVALID_OPERAND);
     
    90149031IEM_CIMPL_DEF_2(iemCImpl_vmwrite_reg, uint64_t, u64Val, uint64_t, u64FieldEnc)
    90159032{
    9016     return iemVmxVmwrite(pVCpu, cbInstr, UINT8_MAX /* iEffSeg */, IEMMODE_64BIT /* N/A */, u64Val, u64FieldEnc,
    9017                          NULL /* pExitInfo */);
     9033    return iemVmxVmwrite(pVCpu, cbInstr, UINT8_MAX /* iEffSeg */, u64Val, u64FieldEnc, NULL /* pExitInfo */);
    90189034}
    90199035
     
    90229038 * Implements 'VMWRITE' memory.
    90239039 */
    9024 IEM_CIMPL_DEF_4(iemCImpl_vmwrite_mem, uint8_t, iEffSeg, IEMMODE, enmEffAddrMode, RTGCPTR, GCPtrVal, uint32_t, u64FieldEnc)
    9025 {
    9026     return iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrVal, u64FieldEnc,  NULL /* pExitInfo */);
     9040IEM_CIMPL_DEF_3(iemCImpl_vmwrite_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrVal, uint32_t, u64FieldEnc)
     9041{
     9042    return iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, GCPtrVal, u64FieldEnc,  NULL /* pExitInfo */);
    90279043}
    90289044
     
    90499065 * Implements 'VMREAD' memory, 64-bit register.
    90509066 */
    9051 IEM_CIMPL_DEF_4(iemCImpl_vmread_mem_reg64, uint8_t, iEffSeg, IEMMODE, enmEffAddrMode, RTGCPTR, GCPtrDst, uint32_t, u64FieldEnc)
    9052 {
    9053     return iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrDst, u64FieldEnc, NULL /* pExitInfo */);
     9067IEM_CIMPL_DEF_3(iemCImpl_vmread_mem_reg64, uint8_t, iEffSeg, RTGCPTR, GCPtrDst, uint32_t, u64FieldEnc)
     9068{
     9069    return iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, GCPtrDst, u64FieldEnc, NULL /* pExitInfo */);
    90549070}
    90559071
     
    90589074 * Implements 'VMREAD' memory, 32-bit register.
    90599075 */
    9060 IEM_CIMPL_DEF_4(iemCImpl_vmread_mem_reg32, uint8_t, iEffSeg, IEMMODE, enmEffAddrMode, RTGCPTR, GCPtrDst, uint32_t, u32FieldEnc)
    9061 {
    9062     return iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrDst, u32FieldEnc, NULL /* pExitInfo */);
     9076IEM_CIMPL_DEF_3(iemCImpl_vmread_mem_reg32, uint8_t, iEffSeg, RTGCPTR, GCPtrDst, uint32_t, u32FieldEnc)
     9077{
     9078    return iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, GCPtrDst, u32FieldEnc, NULL /* pExitInfo */);
    90639079}
    90649080
  • trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsTwoByte0f.cpp.h

    r76652 r78592  
    43474347        if (enmEffOpSize == IEMMODE_64BIT)
    43484348        {
    4349             IEM_MC_BEGIN(4, 0);
     4349            IEM_MC_BEGIN(3, 0);
    43504350            IEM_MC_ARG(uint8_t,       iEffSeg,                                          0);
    4351             IEM_MC_ARG_CONST(IEMMODE, enmEffAddrMode,/*=*/pVCpu->iem.s.enmEffAddrMode,  1);
    4352             IEM_MC_ARG(RTGCPTR,       GCPtrVal,                                         2);
    4353             IEM_MC_ARG(uint64_t,      u64Enc,                                           3);
     4351            IEM_MC_ARG(RTGCPTR,       GCPtrVal,                                         1);
     4352            IEM_MC_ARG(uint64_t,      u64Enc,                                           2);
    43544353            IEM_MC_CALC_RM_EFF_ADDR(GCPtrVal, bRm, 0);
    43554354            IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES();
    43564355            IEM_MC_FETCH_GREG_U64(u64Enc, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg);
    43574356            IEM_MC_ASSIGN(iEffSeg, pVCpu->iem.s.iEffSeg);
    4358             IEM_MC_CALL_CIMPL_4(iemCImpl_vmread_mem_reg64, iEffSeg, enmEffAddrMode, GCPtrVal, u64Enc);
     4357            IEM_MC_CALL_CIMPL_3(iemCImpl_vmread_mem_reg64, iEffSeg, GCPtrVal, u64Enc);
    43594358            IEM_MC_END();
    43604359        }
    43614360        else
    43624361        {
    4363             IEM_MC_BEGIN(4, 0);
     4362            IEM_MC_BEGIN(3, 0);
    43644363            IEM_MC_ARG(uint8_t,       iEffSeg,                                          0);
    4365             IEM_MC_ARG_CONST(IEMMODE, enmEffAddrMode,/*=*/pVCpu->iem.s.enmEffAddrMode,  1);
    4366             IEM_MC_ARG(RTGCPTR,       GCPtrVal,                                         2);
    4367             IEM_MC_ARG(uint32_t,      u32Enc,                                           3);
     4364            IEM_MC_ARG(RTGCPTR,       GCPtrVal,                                         1);
     4365            IEM_MC_ARG(uint32_t,      u32Enc,                                           2);
    43684366            IEM_MC_CALC_RM_EFF_ADDR(GCPtrVal, bRm, 0);
    43694367            IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES();
    43704368            IEM_MC_FETCH_GREG_U32(u32Enc, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg);
    43714369            IEM_MC_ASSIGN(iEffSeg, pVCpu->iem.s.iEffSeg);
    4372             IEM_MC_CALL_CIMPL_4(iemCImpl_vmread_mem_reg32, iEffSeg, enmEffAddrMode, GCPtrVal, u32Enc);
     4370            IEM_MC_CALL_CIMPL_3(iemCImpl_vmread_mem_reg32, iEffSeg, GCPtrVal, u32Enc);
    43734371            IEM_MC_END();
    43744372        }
     
    44294427        if (enmEffOpSize == IEMMODE_64BIT)
    44304428        {
    4431             IEM_MC_BEGIN(4, 0);
     4429            IEM_MC_BEGIN(3, 0);
    44324430            IEM_MC_ARG(uint8_t,       iEffSeg,                                          0);
    4433             IEM_MC_ARG_CONST(IEMMODE, enmEffAddrMode,/*=*/pVCpu->iem.s.enmEffAddrMode,  1);
    4434             IEM_MC_ARG(RTGCPTR,       GCPtrVal,                                         2);
    4435             IEM_MC_ARG(uint64_t,      u64Enc,                                           3);
     4431            IEM_MC_ARG(RTGCPTR,       GCPtrVal,                                         1);
     4432            IEM_MC_ARG(uint64_t,      u64Enc,                                           2);
    44364433            IEM_MC_CALC_RM_EFF_ADDR(GCPtrVal, bRm, 0);
    44374434            IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES();
    44384435            IEM_MC_FETCH_GREG_U64(u64Enc, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg);
    44394436            IEM_MC_ASSIGN(iEffSeg, pVCpu->iem.s.iEffSeg);
    4440             IEM_MC_CALL_CIMPL_4(iemCImpl_vmwrite_mem, iEffSeg, enmEffAddrMode, GCPtrVal, u64Enc);
     4437            IEM_MC_CALL_CIMPL_3(iemCImpl_vmwrite_mem, iEffSeg, GCPtrVal, u64Enc);
    44414438            IEM_MC_END();
    44424439        }
    44434440        else
    44444441        {
    4445             IEM_MC_BEGIN(4, 0);
     4442            IEM_MC_BEGIN(3, 0);
    44464443            IEM_MC_ARG(uint8_t,       iEffSeg,                                          0);
    4447             IEM_MC_ARG_CONST(IEMMODE, enmEffAddrMode,/*=*/pVCpu->iem.s.enmEffAddrMode,  1);
    4448             IEM_MC_ARG(RTGCPTR,       GCPtrVal,                                         2);
    4449             IEM_MC_ARG(uint32_t,      u32Enc,                                           3);
     4444            IEM_MC_ARG(RTGCPTR,       GCPtrVal,                                         1);
     4445            IEM_MC_ARG(uint32_t,      u32Enc,                                           2);
    44504446            IEM_MC_CALC_RM_EFF_ADDR(GCPtrVal, bRm, 0);
    44514447            IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES();
    44524448            IEM_MC_FETCH_GREG_U32(u32Enc, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg);
    44534449            IEM_MC_ASSIGN(iEffSeg, pVCpu->iem.s.iEffSeg);
    4454             IEM_MC_CALL_CIMPL_4(iemCImpl_vmwrite_mem, iEffSeg, enmEffAddrMode, GCPtrVal, u32Enc);
     4450            IEM_MC_CALL_CIMPL_3(iemCImpl_vmwrite_mem, iEffSeg, GCPtrVal, u32Enc);
    44554451            IEM_MC_END();
    44564452        }
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette