VirtualBox

Changeset 66581 in vbox for trunk/src/VBox/VMM/VMMAll


Ignore:
Timestamp:
Apr 17, 2017 3:00:00 AM (8 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
114608
Message:

VMM: Nested Hw.virt: Implemented various SVM intercepts in IEM, addressed some todos.

Location:
trunk/src/VBox/VMM/VMMAll
Files:
8 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp

    r66227 r66581  
    14381438    uint64_t uValidatedEfer;
    14391439    uint64_t const uOldEfer = pVCpu->cpum.s.Guest.msrEFER;
    1440     int rc = CPUMGetValidateEfer(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.s.Guest.cr0, uOldEfer, uValue, &uValidatedEfer);
     1440    int rc = CPUMQueryValidatedGuestEfer(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.s.Guest.cr0, uOldEfer, uValue, &uValidatedEfer);
    14411441    if (RT_FAILURE(rc))
    14421442        return VERR_CPUM_RAISE_GP_0;
     
    61146114 *                          this function returns VINF_SUCCESS).
    61156115 */
    6116 VMMDECL(int) CPUMGetValidateEfer(PVM pVM, uint64_t uCr0, uint64_t uOldEfer, uint64_t uNewEfer, uint64_t *puValidEfer)
     6116VMMDECL(int) CPUMQueryValidatedGuestEfer(PVM pVM, uint64_t uCr0, uint64_t uOldEfer, uint64_t uNewEfer, uint64_t *puValidEfer)
    61176117{
    61186118    uint32_t const  fExtFeatures = pVM->cpum.s.aGuestCpuIdPatmExt[0].uEax >= 0x80000001
  • trunk/src/VBox/VMM/VMMAll/EMAll.cpp

    r65792 r66581  
    193193    /** @todo Complete MONITOR implementation.  */
    194194    return VINF_SUCCESS;
     195}
     196
     197
     198/**
     199 * Checks if the monitor hardware is armed / active.
     200 *
     201 * @returns true if armed, false otherwise.
     202 * @param   pVCpu               The cross context virtual CPU structure of the calling EMT.
     203 */
     204VMM_INT_DECL(bool) EMMonitorIsArmed(PVMCPU pVCpu)
     205{
     206    return RT_BOOL(pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_MONITOR_ACTIVE);
    195207}
    196208
  • trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp

    r66386 r66581  
    176176
    177177
     178/**
     179 * Converts an SVM event type to a TRPM event type.
     180 *
     181 * @returns The TRPM event type.
     182 * @retval  TRPM_32BIT_HACK if the specified type of event isn't among the set
     183 *          of recognized trap types.
     184 *
     185 * @param   pEvent       Pointer to the SVM event.
     186 */
     187VMM_INT_DECL(TRPMEVENT) hmSvmEventToTrpmEventType(PCSVMEVENT pEvent)
     188{
     189    uint8_t const uType = pEvent->n.u3Type;
     190    switch (uType)
     191    {
     192        case SVM_EVENT_EXTERNAL_IRQ:    return TRPM_HARDWARE_INT;
     193        case SVM_EVENT_SOFTWARE_INT:    return TRPM_SOFTWARE_INT;
     194        case SVM_EVENT_EXCEPTION:
     195        case SVM_EVENT_NMI:             return TRPM_TRAP;
     196        default:
     197            break;
     198    }
     199    AssertMsgFailed(("HMSvmEventToTrpmEvent: Invalid pending-event type %#x\n", uType));
     200    return TRPM_32BIT_HACK;
     201}
     202
     203
    178204#ifndef IN_RC
     205/**
     206 * Converts an IEM exception event type to an SVM event type.
     207 *
     208 * @returns The SVM event type.
     209 * @retval  UINT8_MAX if the specified type of event isn't among the set
     210 *          of recognized IEM event types.
     211 *
     212 * @param   uVector         The vector of the event.
     213 * @param   fIemXcptFlags   The IEM exception / interrupt flags.
     214 */
     215static uint8_t hmSvmEventTypeFromIemEvent(uint32_t uVector, uint32_t fIemXcptFlags)
     216{
     217    if (fIemXcptFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
     218        return SVM_EVENT_EXCEPTION;
     219    if (fIemXcptFlags & IEM_XCPT_FLAGS_T_EXT_INT)
     220        return uVector != X86_XCPT_NMI ? SVM_EVENT_EXTERNAL_IRQ : SVM_EVENT_NMI;
     221    if (fIemXcptFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
     222        return SVM_EVENT_SOFTWARE_INT;
     223    AssertMsgFailed(("hmSvmEventTypeFromIemEvent: Invalid IEM xcpt/int. type %#x, uVector=%#x\n", fIemXcptFlags, uVector));
     224    return UINT8_MAX;
     225}
     226
     227
    179228/**
    180229 * Performs the operations necessary that are part of the vmrun instruction
     
    247296            /* Nested paging. */
    248297            if (    pVmcbCtrl->NestedPaging.n.u1NestedPaging
    249                 && !pVM->cpum.ro.GuestFeatures.svm.feat.n.fNestedPaging)
     298                && !pVM->cpum.ro.GuestFeatures.fSvmNestedPaging)
    250299            {
    251300                Log(("HMSvmVmRun: Nested paging not supported -> #VMEXIT\n"));
     
    255304            /* AVIC. */
    256305            if (    pVmcbCtrl->IntCtrl.n.u1AvicEnable
    257                 && !pVM->cpum.ro.GuestFeatures.svm.feat.n.fAvic)
     306                && !pVM->cpum.ro.GuestFeatures.fSvmAvic)
    258307            {
    259308                Log(("HMSvmVmRun: AVIC not supported -> #VMEXIT\n"));
     
    263312            /* Last branch record (LBR) virtualization. */
    264313            if (    (pVmcbCtrl->u64LBRVirt & SVM_LBR_VIRT_ENABLE)
    265                 && !pVM->cpum.ro.GuestFeatures.svm.feat.n.fLbrVirt)
     314                && !pVM->cpum.ro.GuestFeatures.fSvmLbrVirt)
    266315            {
    267316                Log(("HMSvmVmRun: LBR virtualization not supported -> #VMEXIT\n"));
     
    350399            /* EFER, CR0 and CR4. */
    351400            uint64_t uValidEfer;
    352             rc = CPUMGetValidateEfer(pVM, VmcbNstGst.u64CR0, 0 /* uOldEfer */, VmcbNstGst.u64EFER, &uValidEfer);
     401            rc = CPUMQueryValidatedGuestEfer(pVM, VmcbNstGst.u64CR0, 0 /* uOldEfer */, VmcbNstGst.u64EFER, &uValidEfer);
    353402            if (RT_FAILURE(rc))
    354403            {
     
    592641        pCtx->hwvirt.svm.VmcbCtrl.u64ExitInfo1 = uExitInfo1;
    593642        pCtx->hwvirt.svm.VmcbCtrl.u64ExitInfo2 = uExitInfo2;
     643
     644        /*
     645         * Update the exit interrupt information field if this #VMEXIT happened as a result
     646         * of delivering an event.
     647         */
     648        {
     649            uint8_t  uExitIntVector;
     650            uint32_t uExitIntErr;
     651            uint32_t fExitIntFlags;
     652            bool const fRaisingEvent = IEMGetCurrentXcpt(pVCpu, &uExitIntVector, &fExitIntFlags, &uExitIntErr,
     653                                                         NULL /* uExitIntCr2 */);
     654            pCtx->hwvirt.svm.VmcbCtrl.ExitIntInfo.n.u1Valid = fRaisingEvent;
     655            if (fRaisingEvent)
     656            {
     657                pCtx->hwvirt.svm.VmcbCtrl.ExitIntInfo.n.u8Vector = uExitIntVector;
     658                pCtx->hwvirt.svm.VmcbCtrl.ExitIntInfo.n.u3Type   = hmSvmEventTypeFromIemEvent(uExitIntVector, fExitIntFlags);
     659                if (fExitIntFlags & IEM_XCPT_FLAGS_ERR)
     660                {
     661                    pCtx->hwvirt.svm.VmcbCtrl.ExitIntInfo.n.u1ErrorCodeValid = true;
     662                    pCtx->hwvirt.svm.VmcbCtrl.ExitIntInfo.n.u32ErrorCode     = uExitIntErr;
     663                }
     664            }
     665        }
    594666
    595667        /*
     
    920992     * Check if any IO accesses are being intercepted.
    921993     */
    922     if (CPUMIsGuestSvmCtrlInterceptSet(pCtx, SVM_CTRL_INTERCEPT_IOIO_PROT))
    923     {
    924         Assert(CPUMIsGuestInNestedHwVirtMode(pCtx));
    925 
    926         /*
    927          * The IOPM layout:
    928          * Each bit represents one 8-bit port. That makes a total of 0..65535 bits or
    929          * two 4K pages. However, since it's possible to do a 32-bit port IO at port
    930          * 65534 (thus accessing 4 bytes), we need 3 extra bits beyond the two 4K page.
    931          *
    932          * For IO instructions that access more than a single byte, the permission bits
    933          * for all bytes are checked; if any bit is set to 1, the IO access is intercepted.
    934          */
    935         uint8_t *pbIopm = (uint8_t *)pCtx->hwvirt.svm.CTX_SUFF(pvIoBitmap);
    936 
    937         uint16_t const u16Port     = pIoExitInfo->n.u16Port;
    938         uint16_t const offIoBitmap = u16Port >> 3;
    939         uint16_t const fSizeMask   = pIoExitInfo->n.u1OP32 ? 0xf : pIoExitInfo->n.u1OP16 ? 3 : 1;
    940         uint8_t  const cShift      = u16Port - (offIoBitmap << 3);
    941         uint16_t const fIopmMask   = (1 << cShift) | (fSizeMask << cShift);
    942 
    943         pbIopm += offIoBitmap;
    944         uint16_t const fIopmBits = *(uint16_t *)pbIopm;
    945         if (fIopmBits & fIopmMask)
    946             return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_IOIO, pIoExitInfo->u, uNextRip);
    947     }
     994    Assert(CPUMIsGuestInNestedHwVirtMode(pCtx));
     995    Assert(CPUMIsGuestSvmCtrlInterceptSet(pCtx, SVM_CTRL_INTERCEPT_IOIO_PROT));
     996
     997    /*
     998     * The IOPM layout:
     999     * Each bit represents one 8-bit port. That makes a total of 0..65535 bits or
     1000     * two 4K pages.
     1001     *
     1002     * For IO instructions that access more than a single byte, the permission bits
     1003     * for all bytes are checked; if any bit is set to 1, the IO access is intercepted.
     1004     *
     1005     * Since it's possible to do a 32-bit IO access at port 65534 (accessing 4 bytes),
     1006     * we need 3 extra bits beyond the second 4K page.
     1007     */
     1008    uint8_t const *pbIopm = (uint8_t *)pCtx->hwvirt.svm.CTX_SUFF(pvIoBitmap);
     1009
     1010    uint16_t const u16Port   = pIoExitInfo->n.u16Port;
     1011    uint16_t const offIopm   = u16Port >> 3;
     1012    uint16_t const fSizeMask = pIoExitInfo->n.u1OP32 ? 0xf : pIoExitInfo->n.u1OP16 ? 3 : 1;
     1013    uint8_t  const cShift    = u16Port - (offIopm << 3);
     1014    uint16_t const fIopmMask = (1 << cShift) | (fSizeMask << cShift);
     1015
     1016    pbIopm += offIopm;
     1017    uint16_t const fIopmBits = *(uint16_t *)pbIopm;
     1018    if (fIopmBits & fIopmMask)
     1019        return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_IOIO, pIoExitInfo->u, uNextRip);
     1020
    9481021    return VINF_HM_INTERCEPT_NOT_ACTIVE;
    9491022}
     
    9551028 *
    9561029 * @returns Strict VBox status code.
    957  * @retval  VINF_SVM_INTERCEPT_NOT_ACTIVE if the intercept is not active or
    958  *          we're not executing a nested-guest.
     1030 * @retval  VINF_SVM_INTERCEPT_NOT_ACTIVE if the MSR permission bitmap does not
     1031 *          specify interception of the accessed MSR @a idMsr.
    9591032 * @retval  VINF_SVM_VMEXIT if the intercept is active and the \#VMEXIT occurred
    9601033 *          successfully.
     
    9731046     * Check if any MSRs are being intercepted.
    9741047     */
    975     if (CPUMIsGuestSvmCtrlInterceptSet(pCtx, SVM_CTRL_INTERCEPT_MSR_PROT))
     1048    Assert(CPUMIsGuestSvmCtrlInterceptSet(pCtx, SVM_CTRL_INTERCEPT_MSR_PROT));
     1049    Assert(CPUMIsGuestInNestedHwVirtMode(pCtx));
     1050
     1051    uint64_t const uExitInfo1 = fWrite ? SVM_EXIT1_MSR_WRITE : SVM_EXIT1_MSR_READ;
     1052
     1053    /*
     1054     * Get the byte and bit offset of the permission bits corresponding to the MSR.
     1055     */
     1056    uint16_t offMsrpm;
     1057    uint32_t uMsrpmBit;
     1058    int rc = hmSvmGetMsrpmOffsetAndBit(idMsr, &offMsrpm, &uMsrpmBit);
     1059    if (RT_SUCCESS(rc))
    9761060    {
    977         Assert(CPUMIsGuestInNestedHwVirtMode(pCtx));
    978         uint64_t const uExitInfo1 = fWrite ? SVM_EXIT1_MSR_WRITE : SVM_EXIT1_MSR_READ;
     1061        Assert(uMsrpmBit < 0x3fff);
     1062        Assert(offMsrpm < SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT);
     1063        if (fWrite)
     1064            ++uMsrpmBit;
    9791065
    9801066        /*
    981          * Get the byte and bit offset of the permission bits corresponding to the MSR.
     1067         * Check if the bit is set, if so, trigger a #VMEXIT.
    9821068         */
    983         uint16_t offMsrpm;
    984         uint32_t uMsrpmBit;
    985         int rc = hmSvmGetMsrpmOffsetAndBit(idMsr, &offMsrpm, &uMsrpmBit);
    986         if (RT_SUCCESS(rc))
    987         {
    988             Assert(uMsrpmBit < 0x3fff);
    989             Assert(offMsrpm < SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT);
    990             if (fWrite)
    991                 ++uMsrpmBit;
    992 
    993             /*
    994              * Check if the bit is set, if so, trigger a #VMEXIT.
    995              */
    996             uint8_t *pbMsrpm = (uint8_t *)pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap);
    997             pbMsrpm += offMsrpm;
    998             if (ASMBitTest(pbMsrpm, uMsrpmBit))
    999                 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_MSR, uExitInfo1, 0 /* uExitInfo2 */);
    1000         }
    1001         else
    1002         {
    1003             /*
    1004              * This shouldn't happen, but if it does, cause a #VMEXIT and let the "host" (guest hypervisor) deal with it.
    1005              */
    1006             Log(("HMSvmNstGstHandleIntercept: Invalid/out-of-range MSR %#RX32 fWrite=%RTbool\n", idMsr, fWrite));
     1069        uint8_t *pbMsrpm = (uint8_t *)pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap);
     1070        pbMsrpm += offMsrpm;
     1071        if (ASMBitTest(pbMsrpm, uMsrpmBit))
    10071072            return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_MSR, uExitInfo1, 0 /* uExitInfo2 */);
    1008         }
     1073    }
     1074    else
     1075    {
     1076        /*
     1077         * This shouldn't happen, but if it does, cause a #VMEXIT and let the "host" (guest hypervisor) deal with it.
     1078         */
     1079        Log(("HMSvmNstGstHandleIntercept: Invalid/out-of-range MSR %#RX32 fWrite=%RTbool\n", idMsr, fWrite));
     1080        return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_MSR, uExitInfo1, 0 /* uExitInfo2 */);
    10091081    }
    10101082    return VINF_HM_INTERCEPT_NOT_ACTIVE;
     
    10691141#endif /* !IN_RC */
    10701142
    1071 
    1072 /**
    1073  * Converts an SVM event type to a TRPM event type.
    1074  *
    1075  * @returns The TRPM event type.
    1076  * @retval  TRPM_32BIT_HACK if the specified type of event isn't among the set
    1077  *          of recognized trap types.
    1078  *
    1079  * @param   pEvent       Pointer to the SVM event.
    1080  */
    1081 VMM_INT_DECL(TRPMEVENT) hmSvmEventToTrpmEventType(PCSVMEVENT pEvent)
    1082 {
    1083     uint8_t const uType = pEvent->n.u3Type;
    1084     switch (uType)
    1085     {
    1086         case SVM_EVENT_EXTERNAL_IRQ:    return TRPM_HARDWARE_INT;
    1087         case SVM_EVENT_SOFTWARE_INT:    return TRPM_SOFTWARE_INT;
    1088         case SVM_EVENT_EXCEPTION:
    1089         case SVM_EVENT_NMI:             return TRPM_TRAP;
    1090         default:
    1091             break;
    1092     }
    1093     AssertMsgFailed(("HMSvmEventToTrpmEvent: Invalid pending-event type %#x\n", uType));
    1094     return TRPM_32BIT_HACK;
    1095 }
    1096 
  • trunk/src/VBox/VMM/VMMAll/IEMAll.cpp

    r66457 r66581  
    370370 * Check the common SVM instruction preconditions.
    371371 */
    372 #define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \
     372# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \
    373373    do { \
    374374        if (!IEM_IS_SVM_ENABLED(a_pVCpu)) \
     
    392392 * Check if an SVM is enabled.
    393393 */
    394 #define IEM_IS_SVM_ENABLED(a_pVCpu)                         (CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu)))
     394# define IEM_IS_SVM_ENABLED(a_pVCpu)                         (CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu)))
    395395
    396396/**
    397397 * Check if an SVM control/instruction intercept is set.
    398398 */
    399 #define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (CPUMIsGuestSvmCtrlInterceptSet(IEM_GET_CTX(a_pVCpu), (a_Intercept)))
     399# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (CPUMIsGuestSvmCtrlInterceptSet(IEM_GET_CTX(a_pVCpu), (a_Intercept)))
    400400
    401401/**
    402402 * Check if an SVM read CRx intercept is set.
    403403 */
    404 #define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr)    (CPUMIsGuestSvmReadCRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))
     404# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr)    (CPUMIsGuestSvmReadCRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))
    405405
    406406/**
    407407 * Check if an SVM write CRx intercept is set.
    408408 */
    409 #define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr)   (CPUMIsGuestSvmWriteCRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))
     409# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr)   (CPUMIsGuestSvmWriteCRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))
    410410
    411411/**
    412412 * Check if an SVM read DRx intercept is set.
    413413 */
    414 #define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr)    (CPUMIsGuestSvmReadDRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))
     414# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr)    (CPUMIsGuestSvmReadDRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))
    415415
    416416/**
    417417 * Check if an SVM write DRx intercept is set.
    418418 */
    419 #define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr)   (CPUMIsGuestSvmWriteDRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))
     419# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr)   (CPUMIsGuestSvmWriteDRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))
    420420
    421421/**
    422422 * Check if an SVM exception intercept is set.
    423423 */
    424 #define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_enmXcpt)   (CPUMIsGuestSvmXcptInterceptSet(IEM_GET_CTX(a_pVCpu), (a_enmXcpt)))
    425 #endif /* VBOX_WITH_NESTED_HWVIRT */
     424# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector)   (CPUMIsGuestSvmXcptInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uVector)))
     425
     426/**
     427 * Invokes the SVM \#VMEXIT handler for the nested-guest.
     428 */
     429# define IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
     430    do \
     431    { \
     432        VBOXSTRICTRC rcStrictVmExit = HMSvmNstGstVmExit((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_uExitCode), (a_uExitInfo1), \
     433                                                        (a_uExitInfo2)); \
     434        return rcStrictVmExit == VINF_SVM_VMEXIT ? VINF_SUCCESS : rcStrictVmExit; \
     435    } while (0)
     436
     437/**
     438 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
     439 * corresponding decode assist information.
     440 */
     441# define IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
     442    do \
     443    { \
     444        uint64_t uExitInfo1; \
     445        if (   IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssist \
     446            && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
     447            uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
     448        else \
     449            uExitInfo1 = 0; \
     450        IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
     451    } while (0)
     452
     453/**
     454 * Checks and handles an SVM MSR intercept.
     455 */
     456# define IEM_SVM_NST_GST_MSR_INTERCEPT(a_pVCpu, a_idMsr, a_fWrite) \
     457    HMSvmNstGstHandleMsrIntercept((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_idMsr), (a_fWrite))
     458
     459#else
     460# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr)                                    do { } while (0)
     461# define IEM_IS_SVM_ENABLED(a_pVCpu)                                                      (false)
     462# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)                              (false)
     463# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr)                                 (false)
     464# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr)                                (false)
     465# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr)                                 (false)
     466# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr)                                (false)
     467# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector)                                (false)
     468# define IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2)  do { return VERR_SVM_IPE_1; } while (0)
     469# define IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
     470# define IEM_SVM_NST_GST_MSR_INTERCEPT(a_pVCpu, a_idMsr, a_fWrite)                        (VERR_SVM_IPE_1)
     471
     472#endif /* VBOX_WITH_NESTED_HWVIRT && !IN_RC */
    426473
    427474
     
    834881IEM_STATIC VBOXSTRICTRC     iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
    835882
     883#if defined(VBOX_WITH_NESTED_HWVIRT) && !defined(IN_RC)
     884/**
     885 * Checks if the intercepted IO instruction causes a \#VMEXIT and handles it
     886 * accordingly.
     887 *
     888 * @returns VBox strict status code.
     889 * @param   pVCpu           The cross context virtual CPU structure of the calling thread.
     890 * @param   u16Port         The IO port being accessed.
     891 * @param   enmIoType       The type of IO access.
     892 * @param   cbReg           The IO operand size in bytes.
     893 * @param   cAddrSizeBits   The address size bits (for 16, 32 or 64).
     894 * @param   iEffSeg         The effective segment number.
     895 * @param   fRep            Whether this is a repeating IO instruction (REP prefix).
     896 * @param   fStrIo          Whether this is a string IO instruction.
     897 * @param   cbInstr         The length of the IO instruction in bytes.
     898 *
     899 * @remarks This must be called only when IO instructions are intercepted by the
     900 *          nested-guest hypervisor.
     901 */
     902IEM_STATIC VBOXSTRICTRC iemSvmHandleIOIntercept(PVMCPU pVCpu, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
     903                                                uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo, uint8_t cbInstr)
     904{
     905    Assert(IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT));
     906    Assert(cAddrSizeBits == 16 || cAddrSizeBits == 32 || cAddrSizeBits == 64);
     907    Assert(cbReg == 1 || cbReg == 2 || cbReg == 4 || cbReg == 8);
     908
     909    static const uint32_t s_auIoOpSize[]   = { SVM_IOIO_32_BIT_OP, SVM_IOIO_8_BIT_OP, SVM_IOIO_16_BIT_OP, 0, SVM_IOIO_32_BIT_OP, 0, 0, 0 };
     910    static const uint32_t s_auIoAddrSize[] = { 0, SVM_IOIO_16_BIT_ADDR, SVM_IOIO_32_BIT_ADDR, 0, SVM_IOIO_64_BIT_ADDR, 0, 0, 0 };
     911
     912    SVMIOIOEXITINFO IoExitInfo;
     913    IoExitInfo.u         = s_auIoOpSize[cbReg & 7];
     914    IoExitInfo.u        |= s_auIoAddrSize[(cAddrSizeBits >> 4) & 7];
     915    IoExitInfo.n.u1STR   = fStrIo;
     916    IoExitInfo.n.u1REP   = fRep;
     917    IoExitInfo.n.u3SEG   = iEffSeg & 0x7;
     918    IoExitInfo.n.u1Type  = enmIoType;
     919    IoExitInfo.n.u16Port = u16Port;
     920
     921    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
     922    return HMSvmNstGstHandleIOIntercept(pVCpu, pCtx, &IoExitInfo, pCtx->rip + cbInstr);
     923}
     924
     925#else
     926IEM_STATIC VBOXSTRICTRC iemSvmHandleIOIntercept(PVMCPU pVCpu, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
     927                                                uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo, uint8_t cbInstr)
     928{
     929    RT_NOREF9(pVCpu, u16Port, enmIoType, cbReg, cAddrSizeBits, iEffSeg, fRep, fStrIo, cbInstr);
     930    return VERR_IEM_IPE_9;
     931}
     932#endif /* VBOX_WITH_NESTED_HWVIRT && !IN_RC */
    836933
    837934
     
    31263223IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
    31273224{
     3225    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
     3226    {
     3227        Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
     3228        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     3229    }
     3230
    31283231    RT_NOREF_PV(pVCpu);
    31293232    /** @todo Probably need a separate error code and handling for this to
     
    32493352 * @{
    32503353 */
    3251 
    3252 /** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
    3253  * @{ */
    3254 /** CPU exception. */
    3255 #define IEM_XCPT_FLAGS_T_CPU_XCPT       RT_BIT_32(0)
    3256 /** External interrupt (from PIC, APIC, whatever). */
    3257 #define IEM_XCPT_FLAGS_T_EXT_INT        RT_BIT_32(1)
    3258 /** Software interrupt (int or into, not bound).
    3259  * Returns to the following instruction */
    3260 #define IEM_XCPT_FLAGS_T_SOFT_INT       RT_BIT_32(2)
    3261 /** Takes an error code. */
    3262 #define IEM_XCPT_FLAGS_ERR              RT_BIT_32(3)
    3263 /** Takes a CR2. */
    3264 #define IEM_XCPT_FLAGS_CR2              RT_BIT_32(4)
    3265 /** Generated by the breakpoint instruction. */
    3266 #define IEM_XCPT_FLAGS_BP_INSTR         RT_BIT_32(5)
    3267 /** Generated by a DRx instruction breakpoint and RF should be cleared. */
    3268 #define IEM_XCPT_FLAGS_DRx_INSTR_BP     RT_BIT_32(6)
    3269 /** @}  */
    32703354
    32713355
     
    51655249#endif
    51665250
     5251#if defined(VBOX_WITH_NESTED_HWVIRT) && !defined(IN_RC)
     5252    if (IEM_IS_SVM_ENABLED(pVCpu))
     5253    {
     5254        /*
     5255         * Handle nested-guest SVM exception and software interrupt intercepts,
     5256         * see AMD spec. 15.12 "Exception Intercepts".
     5257         *
     5258         *   - NMI intercepts have their own exit code and do not cause SVM_EXIT_EXCEPTION_2 #VMEXITs.
     5259         *   - External interrupts and software interrupts (INTn instruction) do not check the exception intercepts
     5260         *     even when they use a vector in the range 0 to 31.
     5261         *   - ICEBP should not trigger #DB intercept, but its own intercept, so we catch it early in iemOp_int1.
     5262         *   - For #PF exceptions, its intercept is checked before CR2 is written by the exception.
     5263         */
     5264        /* Check NMI intercept */
     5265        if (   u8Vector == X86_XCPT_NMI
     5266            && IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_NMI))
     5267        {
     5268            Log(("iemRaiseXcptOrInt: NMI intercept -> #VMEXIT\n"));
     5269            IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_NMI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     5270        }
     5271
     5272        /* Check CPU exception intercepts. */
     5273        if (   IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, u8Vector)
     5274            && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
     5275        {
     5276            Assert(u8Vector <= 31 /* X86_XCPT_MAX */);
     5277            uint64_t const uExitInfo1 = fFlags & IEM_XCPT_FLAGS_ERR ? uErr : 0;
     5278            uint64_t const uExitInfo2 = fFlags & IEM_XCPT_FLAGS_CR2 ? uCr2 : 0;
     5279            if (   IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist
     5280                && u8Vector == X86_XCPT_PF
     5281                && !(uErr & X86_TRAP_PF_ID))
     5282            {
     5283                /** @todo Nested-guest SVM - figure out fetching op-code bytes from IEM. */
     5284#ifdef IEM_WITH_CODE_TLB
     5285#else
     5286                uint8_t const offOpCode = pVCpu->iem.s.offOpcode;
     5287                uint8_t const cbCurrent = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode;
     5288                if (   cbCurrent > 0
     5289                    && cbCurrent < sizeof(pCtx->hwvirt.svm.VmcbCtrl.abInstr))
     5290                {
     5291                    Assert(cbCurrent <= sizeof(pVCpu->iem.s.abOpcode));
     5292                    memcpy(&pCtx->hwvirt.svm.VmcbCtrl.abInstr[0], &pVCpu->iem.s.abOpcode[offOpCode], cbCurrent);
     5293                }
     5294#endif
     5295            }
     5296            Log(("iemRaiseXcptOrInt: Xcpt intercept (u8Vector=%#x uExitInfo1=%#RX64, uExitInfo2=%#RX64 -> #VMEXIT\n", u8Vector,
     5297                 uExitInfo1, uExitInfo2));
     5298            IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_EXCEPTION_0 + u8Vector, uExitInfo1, uExitInfo2);
     5299        }
     5300
     5301        /* Check software interrupt (INTn) intercepts. */
     5302        if (   IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INTN)
     5303            && (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
     5304        {
     5305            uint64_t const uExitInfo1 = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist ? u8Vector : 0;
     5306            Log(("iemRaiseXcptOrInt: Software INT intercept (u8Vector=%#x) -> #VMEXIT\n", u8Vector));
     5307            IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_SWINT, uExitInfo1, 0 /* uExitInfo2 */);
     5308        }
     5309    }
     5310#endif
     5311
    51675312    /*
    51685313     * Do recursion accounting.
     
    51795324
    51805325        /** @todo double and tripple faults. */
     5326        /** @todo When implementing #DF, the SVM nested-guest #DF intercepts needs some
     5327         *        care. See AMD spec. 15.12 "Exception Intercepts". */
    51815328        if (pVCpu->iem.s.cXcptRecursions >= 3)
    51825329        {
     
    51945341    }
    51955342    pVCpu->iem.s.cXcptRecursions++;
    5196     pVCpu->iem.s.uCurXcpt = u8Vector;
    5197     pVCpu->iem.s.fCurXcpt = fFlags;
     5343    pVCpu->iem.s.uCurXcpt    = u8Vector;
     5344    pVCpu->iem.s.fCurXcpt    = fFlags;
     5345    pVCpu->iem.s.uCurXcptErr = uErr;
     5346    pVCpu->iem.s.uCurXcptCr2 = uCr2;
    51985347
    51995348    /*
     
    96699818iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
    96709819{
     9820    VBOXSTRICTRC rcStrict;
     9821    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IDTR_READS))
     9822    {
     9823        Log(("sidt/sgdt: Guest intercept -> #VMEXIT\n"));
     9824        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_IDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     9825    }
     9826
    96719827    /*
    96729828     * The SIDT and SGDT instructions actually stores the data using two
    96739829     * independent writes.  The instructions does not respond to opsize prefixes.
    96749830     */
    9675     VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
     9831    rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
    96769832    if (rcStrict == VINF_SUCCESS)
    96779833    {
     
    1175611912    } while (0)
    1175711913
    11758 #if 0
    11759 #ifdef VBOX_WITH_NESTED_HWVIRT
    11760 /** The instruction raises an \#UD when SVM is not enabled. */
    11761 #define IEMOP_HLP_NEEDS_SVM_ENABLED() \
    11762     do \
    11763     { \
    11764         if (IEM_IS_SVM_ENABLED(pVCpu)) \
    11765             return IEMOP_RAISE_INVALID_OPCODE(); \
    11766     } while (0)
    11767 #endif
    11768 #endif
    11769 
    1177011914/** The instruction is not available in 64-bit mode, throw \#UD if we're in
    1177111915 * 64-bit mode. */
     
    1191012054            return IEMOP_RAISE_INVALID_OPCODE(); \
    1191112055    } while (0)
     12056
     12057#if defined(VBOX_WITH_NESTED_HWVIRT) && !defined(IN_RC)
     12058/** Check and handles SVM nested-guest control & instruction intercept. */
     12059# define IEMOP_HLP_SVM_CTRL_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
     12060    do \
     12061    { \
     12062        if (IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
     12063            IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
     12064    } while (0)
     12065
     12066/** Check and handle SVM nested-guest CR0 read intercept. */
     12067# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) \
     12068    do \
     12069    { \
     12070        if (IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr)) \
     12071            IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, SVM_EXIT_READ_CR0 + (a_uCr), a_uExitInfo1, a_uExitInfo2); \
     12072    } while (0)
     12073
     12074#else
     12075# define IEMOP_HLP_SVM_CTRL_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
     12076    do { RT_NOREF5(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2); } while (0)
     12077
     12078# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) \
     12079    do { RT_NOREF4(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2); } while (0)
     12080
     12081#endif /* VBOX_WITH_NESTED_HWVIRT && !IN_RC */
     12082
    1191212083
    1191312084/**
     
    1504015211
    1504115212
    15042 #ifdef VBOX_WITH_NESTED_HWVIRT
    1504315213/**
    1504415214 * Checks if IEM is in the process of delivering an event (interrupt or
    1504515215 * exception).
    1504615216 *
    15047  * @returns true if it's raising an interrupt or exception, false otherwise.
    15048  * @param   pVCpu       The cross context virtual CPU structure.
    15049  */
    15050 VMM_INT_DECL(bool) IEMIsRaisingIntOrXcpt(PVMCPU pVCpu)
    15051 {
    15052     return pVCpu->iem.s.cXcptRecursions > 0;
    15053 }
    15054 
    15055 
     15217 * @returns true if we're in the process of raising an interrupt or exception,
     15218 *          false otherwise.
     15219 * @param   pVCpu           The cross context virtual CPU structure.
     15220 * @param   puVector        Where to store the vector associated with the
     15221 *                          currently delivered event, optional.
     15222 * @param   pfFlags         Where to store th event delivery flags (see
     15223 *                          IEM_XCPT_FLAGS_XXX), optional.
     15224 * @param   puErr           Where to store the error code associated with the
     15225 *                          event, optional.
     15226 * @param   puCr2           Where to store the CR2 associated with the event,
     15227 *                          optional.
     15228 */
     15229VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
     15230{
     15231    bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
     15232    if (fRaisingXcpt)
     15233    {
     15234        if (puVector)
     15235            *puVector = pVCpu->iem.s.uCurXcpt;
     15236        if (pfFlags)
     15237            *pfFlags = pVCpu->iem.s.fCurXcpt;
     15238        /* The caller should check the flags to determine if the error code & CR2 are valid for the event. */
     15239        if (puErr)
     15240            *puErr = pVCpu->iem.s.uCurXcptErr;
     15241        if (puCr2)
     15242            *puCr2 = pVCpu->iem.s.uCurXcptCr2;
     15243    }
     15244    return fRaisingXcpt;
     15245}
     15246
     15247
     15248#ifdef VBOX_WITH_NESTED_HWVIRT
    1505615249/**
    1505715250 * Interface for HM and EM to emulate the STGI instruction.
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h

    r66462 r66581  
    558558    VBOXSTRICTRC rcStrict;
    559559
     560    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_PUSHF))
     561    {
     562        Log2(("pushf: Guest intercept -> #VMEXIT\n"));
     563        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_PUSHF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     564    }
     565
    560566    /*
    561567     * If we're in V8086 mode some care is required (which is why we're in
     
    618624    VBOXSTRICTRC    rcStrict;
    619625    uint32_t        fEflNew;
     626
     627    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_POPF))
     628    {
     629        Log2(("popf: Guest intercept -> #VMEXIT\n"));
     630        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_POPF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     631    }
    620632
    621633    /*
     
    38563868
    38573869    /*
     3870     * The SVM nested-guest intercept for iret takes priority over all exceptions,
     3871     * see AMD spec. "15.9 Instruction Intercepts".
     3872     */
     3873    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IRET))
     3874    {
     3875        Log(("iret: Guest intercept -> #VMEXIT\n"));
     3876        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_IRET, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     3877    }
     3878
     3879    /*
    38583880     * Call a mode specific worker.
    38593881     */
     
    46324654    Assert(!IEM_GET_CTX(pVCpu)->eflags.Bits.u1VM);
    46334655
     4656    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_GDTR_WRITES))
     4657    {
     4658        Log(("lgdt: Guest intercept -> #VMEXIT\n"));
     4659        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_GDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     4660    }
     4661
    46344662    /*
    46354663     * Fetch the limit and base address.
     
    46984726    Assert(!IEM_GET_CTX(pVCpu)->eflags.Bits.u1VM);
    46994727
     4728    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IDTR_WRITES))
     4729    {
     4730        Log(("lidt: Guest intercept -> #VMEXIT\n"));
     4731        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_IDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     4732    }
     4733
    47004734    /*
    47014735     * Fetch the limit and base address.
     
    47834817    if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
    47844818    {
     4819        /* Nested-guest SVM intercept. */
     4820        if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_LDTR_WRITES))
     4821        {
     4822            Log(("lldt: Guest intercept -> #VMEXIT\n"));
     4823            IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_LDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     4824        }
     4825
    47854826        Log(("lldt %04x: Loading NULL selector.\n",  uNewLdt));
    47864827        if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
     
    48554896        Log(("lldt %#x - segment not present -> #NP\n", uNewLdt));
    48564897        return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewLdt);
     4898    }
     4899
     4900    /* Nested-guest SVM intercept. */
     4901    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_LDTR_WRITES))
     4902    {
     4903        Log(("lldt: Guest intercept -> #VMEXIT\n"));
     4904        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_LDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    48574905    }
    48584906
     
    49084956        return iemRaiseGeneralProtectionFault0(pVCpu);
    49094957    }
     4958    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TR_WRITES))
     4959    {
     4960        Log(("ltr: Guest intercept -> #VMEXIT\n"));
     4961        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_TR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     4962    }
    49104963
    49114964    /*
     
    50105063    Assert(!pCtx->eflags.Bits.u1VM);
    50115064
     5065    if (IEM_IS_SVM_READ_CR_INTERCEPT_SET(pVCpu, iCrReg))
     5066    {
     5067        Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
     5068        IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(pVCpu, SVM_EXIT_READ_CR0 + iCrReg, IEMACCESSCRX_MOV_CRX, iGReg);
     5069    }
     5070
    50125071    /* read it */
    50135072    uint64_t crX;
     
    50515110 * @param   iCrReg          The CRx register to write (valid).
    50525111 * @param   uNewCrX         The new value.
    5053  */
    5054 IEM_CIMPL_DEF_2(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX)
     5112 * @param   enmAccessCrx    The instruction that caused the CrX load.
     5113 * @param   iGReg           The general register in case of a 'mov CRx,GReg'
     5114 *                          instruction.
     5115 */
     5116IEM_CIMPL_DEF_4(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX, IEMACCESSCRX, enmAccessCrX, uint8_t, iGReg)
    50555117{
    50565118    PCPUMCTX        pCtx  = IEM_GET_CTX(pVCpu);
    50575119    VBOXSTRICTRC    rcStrict;
    50585120    int             rc;
     5121#ifndef VBOX_WITH_NESTED_HWVIRT
     5122    RT_NOREF2(iGReg, enmAccessCrX);
     5123#endif
    50595124
    50605125    /*
     
    51285193
    51295194            /*
     5195             * SVM nested-guest CR0 write intercepts.
     5196             */
     5197            if (IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(pVCpu, iCrReg))
     5198            {
     5199                Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
     5200                IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR0, enmAccessCrX, iGReg);
     5201            }
     5202            if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CR0_SEL_WRITES))
     5203            {
     5204                /* 'lmsw' intercepts regardless of whether the TS/MP bits are actually toggled. */
     5205                if (   enmAccessCrX == IEMACCESSCRX_LMSW
     5206                    || (uNewCrX & ~(X86_CR0_TS | X86_CR0_MP)) != (uOldCrX & ~(X86_CR0_TS | X86_CR0_MP)))
     5207                {
     5208                    Assert(enmAccessCrX != IEMACCESSCRX_CLTS);
     5209                    Log(("iemCImpl_load_Cr%#x: TS/MP bit changed or lmsw instr: Guest intercept -> #VMEXIT\n", iCrReg));
     5210                    IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_CR0_SEL_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     5211                }
     5212            }
     5213
     5214            /*
    51305215             * Change CR0.
    51315216             */
     
    51865271         */
    51875272        case 2:
     5273        {
     5274            if (IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 2))
     5275            {
     5276                Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
     5277                IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR2, enmAccessCrX, iGReg);
     5278            }
    51885279            pCtx->cr2 = uNewCrX;
    51895280            rcStrict  = VINF_SUCCESS;
    51905281            break;
     5282        }
    51915283
    51925284        /*
     
    52195311                     uNewCrX, uNewCrX & ~fValid));
    52205312                uNewCrX &= fValid;
     5313            }
     5314
     5315            if (IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 3))
     5316            {
     5317                Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
     5318                IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR3, enmAccessCrX, iGReg);
    52215319            }
    52225320
     
    52845382            }
    52855383
     5384            if (IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 4))
     5385            {
     5386                Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
     5387                IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR4, enmAccessCrX, iGReg);
     5388            }
    52865389
    52875390            /*
     
    53375440            }
    53385441
     5442            if (IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 8))
     5443            {
     5444                Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
     5445                IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR8, enmAccessCrX, iGReg);
     5446            }
     5447
    53395448            if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
    53405449                APICSetTpr(pVCpu, (uint8_t)uNewCrX << 4);
     
    53795488    else
    53805489        uNewCrX = iemGRegFetchU32(pVCpu, iGReg);
    5381     return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, iCrReg, uNewCrX);
     5490    return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, iCrReg, uNewCrX, IEMACCESSCRX_MOV_CRX, iGReg);
    53825491}
    53835492
     
    54015510    uint64_t uNewCr0 = pCtx->cr0     & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
    54025511    uNewCr0 |= u16NewMsw & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
    5403     return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
     5512    return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0, IEMACCESSCRX_LMSW, UINT8_MAX /* iGReg */);
    54045513}
    54055514
     
    54165525    uint64_t uNewCr0 = pCtx->cr0;
    54175526    uNewCr0 &= ~X86_CR0_TS;
    5418     return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
     5527    return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0, IEMACCESSCRX_CLTS, UINT8_MAX /* iGReg */);
    54195528}
    54205529
     
    54785587    }
    54795588
     5589    /** @todo SVM nested-guest intercept for DR8-DR15? */
     5590    /*
     5591     * Check for any SVM nested-guest intercepts for the DRx read.
     5592     */
     5593    if (IEM_IS_SVM_READ_DR_INTERCEPT_SET(pVCpu, iDrReg))
     5594    {
     5595        Log(("mov r%u,dr%u: Guest intercept -> #VMEXIT\n", iGReg, iDrReg));
     5596        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_READ_DR0 + (iDrReg & 0xf),
     5597                               IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist ? (iGReg & 7) : 0, 0 /* uExitInfo2 */);
     5598    }
     5599
    54805600    if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
    54815601        *(uint64_t *)iemGRegRef(pVCpu, iGReg) = drX;
     
    55685688    }
    55695689
     5690    /** @todo SVM nested-guest intercept for DR8-DR15? */
     5691    /*
     5692     * Check for any SVM nested-guest intercepts for the DRx write.
     5693     */
     5694    if (IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(pVCpu, iDrReg))
     5695    {
     5696        Log2(("mov dr%u,r%u: Guest intercept -> #VMEXIT\n", iDrReg, iGReg));
     5697        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_WRITE_DR0 + (iDrReg & 0xf),
     5698                               IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist ? (iGReg & 7) : 0, 0 /* uExitInfo2 */);
     5699    }
     5700
    55705701    /*
    55715702     * Do the actual setting.
     
    55975728    Assert(!IEM_GET_CTX(pVCpu)->eflags.Bits.u1VM);
    55985729
     5730    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INVLPG))
     5731    {
     5732        Log(("invlpg: Guest intercept (%RGp) -> #VMEXIT\n", GCPtrPage));
     5733        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_INVLPG,
     5734                               IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist ? GCPtrPage : 0, 0 /* uExitInfo2 */);
     5735    }
     5736
    55995737    int rc = PGMInvalidatePage(pVCpu, GCPtrPage);
    56005738    iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     
    56295767        Log(("rdtsc: CR4.TSD and CPL=%u -> #GP(0)\n", pVCpu->iem.s.uCpl));
    56305768        return iemRaiseGeneralProtectionFault0(pVCpu);
     5769    }
     5770
     5771    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDTSC))
     5772    {
     5773        Log(("rdtsc: Guest intercept -> #VMEXIT\n"));
     5774        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_RDTSC, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    56315775    }
    56325776
     
    56475791
    56485792/**
     5793 * Implements RDTSC.
     5794 */
     5795IEM_CIMPL_DEF_0(iemCImpl_rdtscp)
     5796{
     5797    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
     5798
     5799    /*
     5800     * Check preconditions.
     5801     */
     5802    if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fRdTscP)
     5803        return iemRaiseUndefinedOpcode(pVCpu);
     5804
     5805    if (   (pCtx->cr4 & X86_CR4_TSD)
     5806        && pVCpu->iem.s.uCpl != 0)
     5807    {
     5808        Log(("rdtscp: CR4.TSD and CPL=%u -> #GP(0)\n", pVCpu->iem.s.uCpl));
     5809        return iemRaiseGeneralProtectionFault0(pVCpu);
     5810    }
     5811
     5812    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDTSCP))
     5813    {
     5814        Log(("rdtscp: Guest intercept -> #VMEXIT\n"));
     5815        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_RDTSCP, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     5816    }
     5817
     5818    /*
     5819     * Do the job.
     5820     * Query the MSR first in case of trips to ring-3.
     5821     */
     5822    VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &pCtx->rcx);
     5823    if (rcStrict == VINF_SUCCESS)
     5824    {
     5825        /* Low dword of the TSC_AUX msr only. */
     5826        pCtx->rcx &= UINT32_C(0xffffffff);
     5827
     5828        uint64_t uTicks = TMCpuTickGet(pVCpu);
     5829        pCtx->rax = (uint32_t)uTicks;
     5830        pCtx->rdx = uTicks >> 32;
     5831#ifdef IEM_VERIFICATION_MODE_FULL
     5832        pVCpu->iem.s.fIgnoreRaxRdx = true;
     5833#endif
     5834        iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     5835    }
     5836    return rcStrict;
     5837}
     5838
     5839
     5840/**
     5841 * Implements RDPMC.
     5842 */
     5843IEM_CIMPL_DEF_0(iemCImpl_rdpmc)
     5844{
     5845    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
     5846    if (   pVCpu->iem.s.uCpl != 0
     5847        && !(pCtx->cr4 & X86_CR4_PCE))
     5848        return iemRaiseGeneralProtectionFault0(pVCpu);
     5849
     5850    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDPMC))
     5851    {
     5852        Log(("rdpmc: Guest intercept -> #VMEXIT\n"));
     5853        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_RDPMC, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     5854    }
     5855
     5856    /** @todo Implement RDPMC for the regular guest execution case (the above only
     5857     *        handles nested-guest intercepts). */
     5858    RT_NOREF(cbInstr);
     5859    return VERR_IEM_INSTR_NOT_IMPLEMENTED;
     5860}
     5861
     5862
     5863/**
    56495864 * Implements RDMSR.
    56505865 */
     
    56655880     */
    56665881    RTUINT64U uValue;
    5667     VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, pCtx->ecx, &uValue.u);
     5882    VBOXSTRICTRC rcStrict;
     5883    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MSR_PROT))
     5884    {
     5885        rcStrict = IEM_SVM_NST_GST_MSR_INTERCEPT(pVCpu, pCtx->ecx, false /* fWrite */);
     5886        if (rcStrict == VINF_SVM_VMEXIT)
     5887            return VINF_SUCCESS;
     5888        if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
     5889        {
     5890            Log(("IEM: SVM intercepted rdmsr(%#x) failed. rc=%Rrc\n", pCtx->ecx, VBOXSTRICTRC_VAL(rcStrict)));
     5891            return rcStrict;
     5892        }
     5893    }
     5894
     5895    rcStrict = CPUMQueryGuestMsr(pVCpu, pCtx->ecx, &uValue.u);
    56685896    if (rcStrict == VINF_SUCCESS)
    56695897    {
     
    57185946
    57195947    VBOXSTRICTRC rcStrict;
     5948    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MSR_PROT))
     5949    {
     5950        rcStrict = IEM_SVM_NST_GST_MSR_INTERCEPT(pVCpu, pCtx->ecx, true /* fWrite */);
     5951        if (rcStrict == VINF_SVM_VMEXIT)
     5952            return VINF_SUCCESS;
     5953        if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
     5954        {
     5955            Log(("IEM: SVM intercepted rdmsr(%#x) failed. rc=%Rrc\n", pCtx->ecx, VBOXSTRICTRC_VAL(rcStrict)));
     5956            return rcStrict;
     5957        }
     5958    }
     5959
    57205960    if (!IEM_VERIFICATION_ENABLED(pVCpu))
    57215961        rcStrict = CPUMSetGuestMsr(pVCpu, pCtx->ecx, uValue.u);
     
    57766016
    57776017    /*
     6018     * Check SVM nested-guest IO intercept.
     6019     */
     6020    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
     6021    {
     6022        rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_IN, cbReg, 0 /* N/A - cAddrSizeBits */,
     6023                                           0 /* N/A - iEffSeg */, false /* fRep */, false /* fStrIo */, cbInstr);
     6024        if (rcStrict == VINF_SVM_VMEXIT)
     6025            return VINF_SUCCESS;
     6026        if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
     6027        {
     6028            Log(("iemCImpl_in: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, cbReg,
     6029                 VBOXSTRICTRC_VAL(rcStrict)));
     6030            return rcStrict;
     6031        }
     6032    }
     6033
     6034    /*
    57786035     * Perform the I/O.
    57796036     */
     
    58466103
    58476104    /*
     6105     * Check SVM nested-guest IO intercept.
     6106     */
     6107    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
     6108    {
     6109        rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_OUT, cbReg, 0 /* N/A - cAddrSizeBits */,
     6110                                           0 /* N/A - iEffSeg */, false /* fRep */, false /* fStrIo */, cbInstr);
     6111        if (rcStrict == VINF_SVM_VMEXIT)
     6112            return VINF_SUCCESS;
     6113        if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
     6114        {
     6115            Log(("iemCImpl_out: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, cbReg,
     6116                 VBOXSTRICTRC_VAL(rcStrict)));
     6117            return rcStrict;
     6118        }
     6119    }
     6120
     6121    /*
    58486122     * Perform the I/O.
    58496123     */
     
    59146188    }
    59156189
    5916 #ifndef IN_RC
    59176190    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMRUN))
    59186191    {
    59196192        Log(("vmrun: Guest intercept -> #VMEXIT\n"));
    5920         return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_VMRUN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    5921     }
    5922 #endif
     6193        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_VMRUN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     6194    }
    59236195
    59246196    VBOXSTRICTRC rcStrict = HMSvmVmrun(pVCpu, pCtx, GCPhysVmcb);
     
    59416213{
    59426214    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
    5943 #ifndef IN_RC
    59446215    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMMCALL))
    59456216    {
    5946         Log(("vmrun: Guest intercept -> #VMEXIT\n"));
    5947         return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_VMMCALL, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    5948     }
    5949 #endif
     6217        Log(("vmmcall: Guest intercept -> #VMEXIT\n"));
     6218        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_VMMCALL, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     6219    }
    59506220
    59516221    bool fUpdatedRipAndRF;
     
    59696239    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
    59706240    IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmload);
    5971 #ifndef IN_RC
    5972     if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMLOAD))
    5973     {
    5974         Log(("vmload: Guest intercept -> #VMEXIT\n"));
    5975         return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_VMLOAD, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    5976     }
    5977 #endif
    59786241
    59796242    RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;
     
    59836246        Log(("vmload: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb));
    59846247        return iemRaiseGeneralProtectionFault0(pVCpu);
     6248    }
     6249
     6250    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMLOAD))
     6251    {
     6252        Log(("vmload: Guest intercept -> #VMEXIT\n"));
     6253        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_VMLOAD, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    59856254    }
    59866255
     
    60206289    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
    60216290    IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmsave);
    6022 #ifndef IN_RC
    6023     if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMSAVE))
    6024     {
    6025         Log(("vmsave: Guest intercept -> #VMEXIT\n"));
    6026         return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_VMSAVE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    6027     }
    6028 #endif
    60296291
    60306292    RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;
     
    60346296        Log(("vmsave: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb));
    60356297        return iemRaiseGeneralProtectionFault0(pVCpu);
     6298    }
     6299
     6300    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMSAVE))
     6301    {
     6302        Log(("vmsave: Guest intercept -> #VMEXIT\n"));
     6303        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_VMSAVE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    60366304    }
    60376305
     
    60716339    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
    60726340    IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, clgi);
    6073 #ifndef IN_RC
    60746341    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CLGI))
    60756342    {
    60766343        Log(("clgi: Guest intercept -> #VMEXIT\n"));
    6077         return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_CLGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    6078     }
    6079 #endif
     6344        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_CLGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     6345    }
    60806346
    60816347    pCtx->hwvirt.svm.fGif = 0;
     
    60926358    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
    60936359    IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, stgi);
    6094 #ifndef IN_RC
    60956360    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_STGI))
    60966361    {
    60976362        Log2(("stgi: Guest intercept -> #VMEXIT\n"));
    6098         return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_STGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    6099     }
    6100 #endif
     6363        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_STGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     6364    }
    61016365
    61026366    pCtx->hwvirt.svm.fGif = 1;
     
    61126376{
    61136377    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
    6114     IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, invlpga);
    6115 #ifndef IN_RC
    6116     if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INVLPGA))
    6117     {
    6118         Log2(("invlpga: Guest intercept -> #VMEXIT\n"));
    6119         return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVLPGA, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    6120     }
    6121 #endif
    6122 
    61236378    RTGCPTR  const GCPtrPage = pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;
    61246379    /** @todo PGM needs virtual ASID support. */
     
    61266381    uint32_t const uAsid     = pCtx->ecx;
    61276382#endif
     6383
     6384    IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, invlpga);
     6385    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INVLPGA))
     6386    {
     6387        Log2(("invlpga: Guest intercept (%RGp) -> #VMEXIT\n", GCPtrPage));
     6388        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_INVLPGA, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     6389    }
     6390
    61286391    PGMInvalidatePage(pVCpu, GCPtrPage);
    61296392    iemRegAddToRipAndClearRF(pVCpu, cbInstr);
    61306393    return VINF_SUCCESS;
     6394}
     6395
     6396
     6397/**
     6398 * Implements 'SKINIT'.
     6399 */
     6400IEM_CIMPL_DEF_0(iemCImpl_skinit)
     6401{
     6402    IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, invlpga);
     6403
     6404    uint32_t uIgnore;
     6405    uint32_t fFeaturesECX;
     6406    CPUMGetGuestCpuId(pVCpu, 0x80000001, 0, &uIgnore, &uIgnore, &fFeaturesECX, &uIgnore);
     6407    if (!(fFeaturesECX & X86_CPUID_AMD_FEATURE_ECX_SKINIT))
     6408        return iemRaiseUndefinedOpcode(pVCpu);
     6409
     6410    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SKINIT))
     6411    {
     6412        Log2(("skinit: Guest intercept -> #VMEXIT\n"));
     6413        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_SKINIT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     6414    }
     6415
     6416    RT_NOREF(cbInstr);
     6417    return VERR_IEM_INSTR_NOT_IMPLEMENTED;
    61316418}
    61326419#endif /* VBOX_WITH_NESTED_HWVIRT */
     
    62286515    if (pVCpu->iem.s.uCpl != 0)
    62296516        return iemRaiseGeneralProtectionFault0(pVCpu);
     6517
     6518    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_HLT))
     6519    {
     6520        Log2(("hlt: Guest intercept -> #VMEXIT\n"));
     6521        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_HLT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     6522    }
     6523
    62306524    iemRegAddToRipAndClearRF(pVCpu, cbInstr);
    62316525    return VINF_EM_HALT;
     
    62766570        return rcStrict;
    62776571
     6572    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MONITOR))
     6573    {
     6574        Log2(("monitor: Guest intercept -> #VMEXIT\n"));
     6575        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_MONITOR, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     6576    }
     6577
    62786578    /*
    62796579     * Call EM to prepare the monitor/wait.
     
    63346634
    63356635    /*
     6636     * Check SVM nested-guest mwait intercepts.
     6637     */
     6638    if (   IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MWAIT_ARMED)
     6639        && EMMonitorIsArmed(pVCpu))
     6640    {
     6641        Log2(("mwait: Guest intercept (monitor hardware armed) -> #VMEXIT\n"));
     6642        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_MWAIT_ARMED, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     6643    }
     6644    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MWAIT))
     6645    {
     6646        Log2(("mwait: Guest intercept -> #VMEXIT\n"));
     6647        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_MWAIT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     6648    }
     6649
     6650    /*
    63366651     * Call EM to prepare the monitor/wait.
    63376652     */
     
    63786693{
    63796694    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
     6695
     6696    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CPUID))
     6697    {
     6698        Log2(("cpuid: Guest intercept -> #VMEXIT\n"));
     6699        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_CPUID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     6700    }
    63806701
    63816702    CPUMGetGuestCpuId(pVCpu, pCtx->eax, pCtx->ecx, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx);
     
    67267047    if (pCtx->cr4 & X86_CR4_OSXSAVE)
    67277048    {
     7049        if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_XSETBV))
     7050        {
     7051            Log2(("xsetbv: Guest intercept -> #VMEXIT\n"));
     7052            IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_XSETBV, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     7053        }
     7054
    67287055        if (pVCpu->iem.s.uCpl == 0)
    67297056        {
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h

    r62478 r66581  
    12181218    }
    12191219
     1220    /*
     1221     * Check SVM nested-guest IO intercept.
     1222     */
     1223    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
     1224    {
     1225        rcStrict = iemSvmHandleIOIntercept(pVCpu, pCtx->dx, SVMIOIOTYPE_IN, OP_SIZE / 8, ADDR_SIZE, X86_SREG_ES, false /* fRep */,
     1226                                           true /* fStrIo */, cbInstr);
     1227        if (rcStrict == VINF_SVM_VMEXIT)
     1228            return VINF_SUCCESS;
     1229        if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
     1230        {
     1231            Log(("iemCImpl_ins_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", pCtx->dx, OP_SIZE / 8,
     1232                 VBOXSTRICTRC_VAL(rcStrict)));
     1233            return rcStrict;
     1234        }
     1235    }
     1236
    12201237    OP_TYPE        *puMem;
    12211238    rcStrict = iemMemMap(pVCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, pCtx->ADDR_rDI, IEM_ACCESS_DATA_W);
     
    12691286        if (rcStrict != VINF_SUCCESS)
    12701287            return rcStrict;
     1288    }
     1289
     1290    /*
     1291     * Check SVM nested-guest IO intercept.
     1292     */
     1293    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
     1294    {
     1295        rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_IN, OP_SIZE / 8, ADDR_SIZE, X86_SREG_ES, true /* fRep */,
     1296                                           true /* fStrIo */, cbInstr);
     1297        if (rcStrict == VINF_SVM_VMEXIT)
     1298            return VINF_SUCCESS;
     1299        if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
     1300        {
     1301            Log(("iemCImpl_rep_ins_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, OP_SIZE / 8,
     1302                 VBOXSTRICTRC_VAL(rcStrict)));
     1303            return rcStrict;
     1304        }
    12711305    }
    12721306
     
    14551489    }
    14561490
     1491    /*
     1492     * Check SVM nested-guest IO intercept.
     1493     */
     1494    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
     1495    {
     1496        rcStrict = iemSvmHandleIOIntercept(pVCpu, pCtx->dx, SVMIOIOTYPE_OUT, OP_SIZE / 8, ADDR_SIZE, iEffSeg, false /* fRep */,
     1497                                           true /* fStrIo */, cbInstr);
     1498        if (rcStrict == VINF_SVM_VMEXIT)
     1499            return VINF_SUCCESS;
     1500        if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
     1501        {
     1502            Log(("iemCImpl_outs_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", pCtx->dx, OP_SIZE / 8,
     1503                 VBOXSTRICTRC_VAL(rcStrict)));
     1504            return rcStrict;
     1505        }
     1506    }
     1507
    14571508    OP_TYPE uValue;
    14581509    rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue, iEffSeg, pCtx->ADDR_rSI);
     
    14961547        if (rcStrict != VINF_SUCCESS)
    14971548            return rcStrict;
     1549    }
     1550
     1551    /*
     1552     * Check SVM nested-guest IO intercept.
     1553     */
     1554    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
     1555    {
     1556        rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_OUT, OP_SIZE / 8, ADDR_SIZE, iEffSeg, true /* fRep */,
     1557                                           true /* fStrIo */, cbInstr);
     1558        if (rcStrict == VINF_SVM_VMEXIT)
     1559            return VINF_SUCCESS;
     1560        if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
     1561        {
     1562            Log(("iemCImpl_rep_outs_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, OP_SIZE / 8,
     1563                 VBOXSTRICTRC_VAL(rcStrict)));
     1564            return rcStrict;
     1565        }
    14981566    }
    14991567
  • trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsOneByte.cpp.h

    r66479 r66581  
    44544454
    44554455    if (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)
     4456    {
    44564457        IEMOP_MNEMONIC(pause, "pause");
     4458#ifdef VBOX_WITH_NESTED_HWVIRT
     4459        /** @todo Pause filter count and threshold with SVM nested hardware virt. */
     4460        Assert(!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmPauseFilter);
     4461        Assert(!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmPauseFilterThreshold);
     4462#endif
     4463        IEMOP_HLP_SVM_CTRL_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_PAUSE, SVM_EXIT_PAUSE, 0, 0);
     4464    }
    44574465    else
    44584466        IEMOP_MNEMONIC(nop, "nop");
     
    1058210590    IEMOP_HLP_MIN_386(); /** @todo does not generate #UD on 286, or so they say... */
    1058310591    /** @todo testcase! */
     10592    IEMOP_HLP_SVM_CTRL_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_ICEBP, SVM_EXIT_ICEBP, 0, 0);
    1058410593    return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_int, X86_XCPT_DB, false /*fIsBpInstr*/);
    1058510594}
  • trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsTwoByte0f.cpp.h

    r66474 r66581  
    3535    {
    3636        IEMOP_HLP_DECODED_NL_1(OP_SLDT, IEMOPFORM_M_REG, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
     37        IEMOP_HLP_SVM_CTRL_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_LDTR_READS, SVM_EXIT_LDTR_READ, 0, 0);
    3738        switch (pVCpu->iem.s.enmEffOpSize)
    3839        {
     
    7475        IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
    7576        IEMOP_HLP_DECODED_NL_1(OP_SLDT, IEMOPFORM_M_MEM, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
     77        IEMOP_HLP_SVM_CTRL_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_LDTR_READS, SVM_EXIT_LDTR_READ, 0, 0);
    7678        IEM_MC_FETCH_LDTR_U16(u16Ldtr);
    7779        IEM_MC_STORE_MEM_U16(pVCpu->iem.s.iEffSeg, GCPtrEffDst, u16Ldtr);
     
    9395    {
    9496        IEMOP_HLP_DECODED_NL_1(OP_STR, IEMOPFORM_M_REG, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
     97        IEMOP_HLP_SVM_CTRL_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_TR_READS, SVM_EXIT_TR_READ, 0, 0);
    9598        switch (pVCpu->iem.s.enmEffOpSize)
    9699        {
     
    132135        IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
    133136        IEMOP_HLP_DECODED_NL_1(OP_STR, IEMOPFORM_M_MEM, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
     137        IEMOP_HLP_SVM_CTRL_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_TR_READS, SVM_EXIT_TR_READ, 0, 0);
    134138        IEM_MC_FETCH_TR_U16(u16Tr);
    135139        IEM_MC_STORE_MEM_U16(pVCpu->iem.s.iEffSeg, GCPtrEffDst, u16Tr);
     
    482486    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_invlpga);
    483487}
     488
     489
     490/** Opcode 0x0f 0x01 0xde. */
     491FNIEMOP_DEF(iemOp_Grp7_Amd_skinit)
     492{
     493    IEMOP_MNEMONIC(skinit, "skinit");
     494    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_skinit);
     495}
    484496#else
    485497/** Opcode 0x0f 0x01 0xd8. */
     
    503515/** Opcode 0x0f 0x01 0xdf. */
    504516FNIEMOP_UD_STUB(iemOp_Grp7_Amd_invlpga);
    505 #endif /* VBOX_WITH_NESTED_HWVIRT */
    506517
    507518/** Opcode 0x0f 0x01 0xde. */
    508519FNIEMOP_UD_STUB(iemOp_Grp7_Amd_skinit);
     520#endif /* VBOX_WITH_NESTED_HWVIRT */
    509521
    510522/** Opcode 0x0f 0x01 /4. */
     
    516528    {
    517529        IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
     530        IEMOP_HLP_SVM_READ_CR_INTERCEPT(pVCpu, /*cr*/ 0, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    518531        switch (pVCpu->iem.s.enmEffOpSize)
    519532        {
     
    562575        IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
    563576        IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
     577        IEMOP_HLP_SVM_READ_CR_INTERCEPT(pVCpu, /*cr*/ 0, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    564578        IEM_MC_FETCH_CR0_U16(u16Tmp);
    565579        if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_386)
     
    636650FNIEMOP_DEF(iemOp_Grp7_rdtscp)
    637651{
    638     NOREF(pVCpu);
    639     IEMOP_BITCH_ABOUT_STUB();
    640     return VERR_IEM_INSTR_NOT_IMPLEMENTED;
     652    IEMOP_MNEMONIC(rdtscp, "rdtscp");
     653    IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
     654    /** @todo SVM intercept removal from here. */
     655    IEMOP_HLP_SVM_CTRL_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_RDTSCP, SVM_EXIT_RDTSCP, 0, 0);
     656    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rdtscp);
    641657}
    642658
     
    868884
    869885/** Opcode 0x0f 0x08. */
    870 FNIEMOP_STUB(iemOp_invd);
     886FNIEMOP_DEF(iemOp_invd)
     887{
     888    IEMOP_MNEMONIC(invd, "invd");
     889#ifdef VBOX_WITH_NESTED_HWVIRT
     890    IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO();
     891    IEMOP_HLP_SVM_CTRL_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_INVD, SVM_EXIT_INVD, 0, 0);
     892#endif
     893    /** @todo implement invd for the regular case (above only handles nested SVM
     894     *        exits). */
     895    IEMOP_BITCH_ABOUT_STUB();
     896    return VERR_IEM_INSTR_NOT_IMPLEMENTED;
     897}
     898
    871899// IEMOP_HLP_MIN_486();
    872900
     
    880908    IEM_MC_BEGIN(0, 0);
    881909    IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO();
     910    IEMOP_HLP_SVM_CTRL_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_WBINVD, SVM_EXIT_WBINVD, 0, 0);
    882911    IEM_MC_ADVANCE_RIP();
    883912    IEM_MC_END();
     
    20312060
    20322061/** Opcode 0x0f 0x34. */
    2033 FNIEMOP_STUB(iemOp_rdpmc);
     2062FNIEMOP_DEF(iemOp_rdpmc)
     2063{
     2064    IEMOP_MNEMONIC(rdpmc, "rdpmc");
     2065    IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
     2066    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rdpmc);
     2067}
     2068
     2069
    20342070/** Opcode 0x0f 0x34. */
    20352071FNIEMOP_STUB(iemOp_sysenter);
     
    57225758
    57235759/** Opcode 0x0f 0xaa. */
    5724 FNIEMOP_STUB(iemOp_rsm);
     5760FNIEMOP_DEF(iemOp_rsm)
     5761{
     5762    IEMOP_MNEMONIC(rsm, "rsm");
     5763    IEMOP_HLP_SVM_CTRL_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_RSM, SVM_EXIT_RSM, 0, 0);
     5764    /** @todo rsm - for the regular case (above handles only the SVM nested-guest
     5765     *        intercept). */
     5766    IEMOP_BITCH_ABOUT_STUB();
     5767    return IEMOP_RAISE_INVALID_OPCODE();
     5768}
     5769
    57255770//IEMOP_HLP_MIN_386();
    57265771
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette