VirtualBox

Changeset 73203 in vbox


Ignore:
Timestamp:
Jul 18, 2018 1:00:43 PM (7 years ago)
Author:
vboxsync
Message:

VMM, Devices: bugref:9193 Remove unused code after using EMRZSetPendingIoPort[Read|Write].

Location:
trunk
Files:
15 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/err.h

    r73199 r73203  
    229229/** Reason for leaving RC: The IRET resuming guest code trapped. */
    230230#define VINF_EM_RAW_IRET_TRAP               1139
    231 /** Reason for leaving RC: Emulate (MM)IO intensive code in the recompiler. */
    232 #define VINF_EM_RAW_EMULATE_IO_BLOCK        1140
    233231/** The interpreter was unable to deal with the instruction at hand. */
    234232#define VERR_EM_INTERPRETER                 (-1148)
  • trunk/include/VBox/vmm/hm.h

    r72983 r73203  
    7474# define HMIsRawModeCtxNeeded(a_pVM)        (!HMIsEnabled(a_pVM) || (a_pVM)->fHMNeedRawModeCtx)
    7575#endif
    76 
    77  /**
    78  * Check if the current CPU state is valid for emulating IO blocks in the recompiler
    79  *
    80  * @returns boolean
    81  * @param   a_pVCpu     Pointer to the shared virtual CPU structure.
    82  * @internal
    83  */
    84 #define HMCanEmulateIoBlock(a_pVCpu)        (!CPUMIsGuestInPagedProtectedMode(a_pVCpu))
    85 
    86  /**
    87  * Check if the current CPU state is valid for emulating IO blocks in the recompiler
    88  *
    89  * @returns boolean
    90  * @param   a_pCtx      Pointer to the CPU context (within PVM).
    91  * @internal
    92  */
    93 #define HMCanEmulateIoBlockEx(a_pCtx)       (!CPUMIsGuestInPagedProtectedModeEx(a_pCtx))
    9476
    9577/**
     
    256238VMMR3_INT_DECL(bool)            HMR3IsActive(PVMCPU pVCpu);
    257239VMMR3_INT_DECL(void)            HMR3PagingModeChanged(PVM pVM, PVMCPU pVCpu, PGMMODE enmShadowMode, PGMMODE enmGuestMode);
    258 VMMR3_INT_DECL(int)             HMR3EmulateIoBlock(PVM pVM, PCPUMCTX pCtx);
    259 VMMR3_INT_DECL(bool)            HMR3HasPendingIOInstr(PVMCPU pVCpu);
    260 VMMR3_INT_DECL(VBOXSTRICTRC)    HMR3RestartPendingIOInstr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
    261240VMMR3_INT_DECL(int)             HMR3EnablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem);
    262241VMMR3_INT_DECL(int)             HMR3DisablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem);
  • trunk/include/VBox/vmm/pdmdev.h

    r73097 r73203  
    39513951
    39523952    /**
    3953      * Checks if our current CPU state allows for IO block emulation fallback to the recompiler
    3954      *
    3955      * @returns true = yes, false = no
    3956      * @param   pDevIns         Device instance.
    3957      */
    3958     DECLR0CALLBACKMEMBER(bool, pfnCanEmulateIoBlock,(PPDMDEVINS pDevIns));
    3959 
    3960     /**
    39613953     * Gets the VMCPU handle. Restricted API.
    39623954     *
     
    40334025
    40344026/** Current PDMDEVHLP version number. */
    4035 #define PDM_DEVHLPR0_VERSION                    PDM_VERSION_MAKE(0xffe5, 7, 0)
     4027#define PDM_DEVHLPR0_VERSION                    PDM_VERSION_MAKE(0xffe5, 8, 0)
    40364028
    40374029
     
    53815373
    53825374#endif /* IN_RING3 */
    5383 #ifdef IN_RING0
    5384 
    5385 /**
    5386  * @copydoc PDMDEVHLPR0::pfnCanEmulateIoBlock
    5387  */
    5388 DECLINLINE(bool) PDMDevHlpCanEmulateIoBlock(PPDMDEVINS pDevIns)
    5389 {
    5390     return pDevIns->CTX_SUFF(pHlp)->pfnCanEmulateIoBlock(pDevIns);
    5391 }
    5392 
    5393 #endif /* IN_RING0 */
    5394 
    5395 
    5396 
    53975375
    53985376/** Pointer to callbacks provided to the VBoxDeviceRegister() call. */
  • trunk/src/VBox/Devices/Graphics/DevVGA.cpp

    r71698 r73203  
    14501450        /* standard VGA latched access */
    14511451        VERIFY_VRAM_WRITE_OFF_RETURN(pThis, addr * 4 + 3);
    1452 
    1453 #if 0
    1454 /* This code does not work reliably (@bugref{8123}) and no longer helps performance either. */
    1455 #ifdef IN_RING0
    1456         if (((++pThis->cLatchAccesses) & pThis->uMaskLatchAccess) == pThis->uMaskLatchAccess)
    1457         {
    1458             static uint32_t const s_aMask[5]  = {   0x3ff,   0x1ff,    0x7f,    0x3f,   0x1f};
    1459             static uint64_t const s_aDelta[5] = {10000000, 5000000, 2500000, 1250000, 625000};
    1460             if (PDMDevHlpCanEmulateIoBlock(pThis->CTX_SUFF(pDevIns)))
    1461             {
    1462                 uint64_t u64CurTime = RTTimeSystemNanoTS();
    1463 
    1464                 /* About 1000 (or more) accesses per 10 ms will trigger a reschedule
    1465                 * to the recompiler
    1466                 */
    1467                 if (u64CurTime - pThis->u64LastLatchedAccess < s_aDelta[pThis->iMask])
    1468                 {
    1469                     pThis->u64LastLatchedAccess = 0;
    1470                     pThis->iMask                = RT_MIN(pThis->iMask + 1U, RT_ELEMENTS(s_aMask) - 1U);
    1471                     pThis->uMaskLatchAccess     = s_aMask[pThis->iMask];
    1472                     pThis->cLatchAccesses       = pThis->uMaskLatchAccess - 1;
    1473                     return VINF_EM_RAW_EMULATE_IO_BLOCK;
    1474                 }
    1475                 if (pThis->u64LastLatchedAccess)
    1476                 {
    1477                     Log2(("Reset mask (was %d) delta %RX64 (limit %x)\n", pThis->iMask, u64CurTime - pThis->u64LastLatchedAccess, s_aDelta[pThis->iMask]));
    1478                     if (pThis->iMask)
    1479                         pThis->iMask--;
    1480                     pThis->uMaskLatchAccess     = s_aMask[pThis->iMask];
    1481                 }
    1482                 pThis->u64LastLatchedAccess = u64CurTime;
    1483             }
    1484             else
    1485             {
    1486                 pThis->u64LastLatchedAccess = 0;
    1487                 pThis->iMask                = 0;
    1488                 pThis->uMaskLatchAccess     = s_aMask[pThis->iMask];
    1489                 pThis->cLatchAccesses       = 0;
    1490             }
    1491         }
    1492 #endif
    1493 #endif
    14941452
    14951453        write_mode = pThis->gr[5] & 3;
  • trunk/src/VBox/VMM/VMMAll/IEMAll.cpp

    r73097 r73203  
    1361013610                      || rcStrict == VINF_EM_RAW_EMULATE_INSTR
    1361113611                      || rcStrict == VINF_EM_RAW_TO_R3
    13612                       || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
    1361313612                      || rcStrict == VINF_EM_TRIPLE_FAULT
    1361413613                      || rcStrict == VINF_GIM_R3_HYPERCALL
  • trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp

    r72493 r73203  
    818818                  || rcStrict == VINF_EM_SUSPEND
    819819                  || rcStrict == VINF_EM_RESET
    820                   || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
    821820                  //|| rcStrict == VINF_EM_HALT       /* ?? */
    822821                  //|| rcStrict == VINF_EM_NO_MEMORY  /* ?? */
  • trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp

    r73199 r73203  
    7676     || (a_rcStrict) == VINF_IOM_R3_MMIO_READ_WRITE \
    7777     || ((a_rcStrict) == VINF_IOM_R3_MMIO_COMMIT_WRITE && (a_fWrite)) \
    78      \
    79      || ((a_fWrite) ? (a_rcStrict) == VINF_EM_RAW_EMULATE_IO_BLOCK : false) \
    8078     \
    8179     || (a_rcStrict) == VINF_EM_RAW_EMULATE_INSTR  \
     
    31123110 * @retval  VINF_IOM_R3_MMIO_READ_WRITE in RC and R0.
    31133111 * @retval  VINF_IOM_R3_MMIO_COMMIT_WRITE in RC and R0.
    3114  *
    3115  * @retval  VINF_EM_RAW_EMULATE_IO_BLOCK in R0 only.
    31163112 *
    31173113 * @retval  VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT in RC only - write completed.
  • trunk/src/VBox/VMM/VMMR0/PDMR0Device.cpp

    r69111 r73203  
    402402    LogFlow(("pdmR3DevHlp_DBGFTraceBuf: caller='%p'/%d: returns %p\n", pDevIns, pDevIns->iInstance, hTraceBuf));
    403403    return hTraceBuf;
    404 }
    405 
    406 
    407 /** @interface_method_impl{PDMDEVHLPR0,pfnCanEmulateIoBlock} */
    408 static DECLCALLBACK(bool) pdmR0DevHlp_CanEmulateIoBlock(PPDMDEVINS pDevIns)
    409 {
    410     PDMDEV_ASSERT_DEVINS(pDevIns);
    411     LogFlow(("pdmR0DevHlp_GetVM: caller='%p'/%d\n", pDevIns, pDevIns->iInstance));
    412     return HMCanEmulateIoBlock(VMMGetCpu(pDevIns->Internal.s.pVMR0));
    413404}
    414405
     
    435426    pdmR0DevHlp_PATMSetMMIOPatchInfo,
    436427    pdmR0DevHlp_GetVM,
    437     pdmR0DevHlp_CanEmulateIoBlock,
    438428    pdmR0DevHlp_GetVMCPU,
    439429    pdmR0DevHlp_GetCurrentCpuId,
  • trunk/src/VBox/VMM/VMMR0/VMMR0.cpp

    r73097 r73203  
    866866            STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
    867867            break;
    868         case VINF_EM_RAW_EMULATE_IO_BLOCK:
    869             STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOBlockEmulate);
    870             break;
    871868        case VINF_PATCH_EMULATE_INSTR:
    872869            STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
  • trunk/src/VBox/VMM/VMMR3/EMHM.cpp

    r72749 r73203  
    254254static int emR3HmExecuteIOInstruction(PVM pVM, PVMCPU pVCpu)
    255255{
     256    RT_NOREF(pVM);
    256257    STAM_PROFILE_START(&pVCpu->em.s.StatIOEmu, a);
    257258
     
    262263    {
    263264        /*
    264          * Try to restart the io instruction that was refused in ring-0.
    265          */
    266         rcStrict = HMR3RestartPendingIOInstr(pVM, pVCpu, &pVCpu->cpum.GstCtx);
    267         if (IOM_SUCCESS(rcStrict))
    268         {
    269             STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIoRestarted);
    270             STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
    271             return VBOXSTRICTRC_TODO(rcStrict);     /* rip already updated. */
    272         }
    273         AssertMsgReturn(rcStrict == VERR_NOT_FOUND, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)),
    274                         RT_SUCCESS_NP(rcStrict) ? VERR_IPE_UNEXPECTED_INFO_STATUS : VBOXSTRICTRC_TODO(rcStrict));
    275 
    276         /*
    277265         * Hand it over to the interpreter.
    278266         */
     
    285273        RT_UNTRUSTED_VALIDATED_FENCE();
    286274        CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
    287         Assert(!HMR3HasPendingIOInstr(pVCpu));
    288275        rcStrict = EMHistoryExec(pVCpu, &pVCpu->em.s.aExitRecords[idxContinueExitRec], 0);
    289276        LogFlow(("emR3HmExecuteIOInstruction: %Rrc (EMHistoryExec)\n", VBOXSTRICTRC_VAL(rcStrict)));
  • trunk/src/VBox/VMM/VMMR3/HM.cpp

    r73107 r73203  
    27842784
    27852785/**
    2786  * Force execution of the current IO code in the recompiler.
    2787  *
    2788  * @returns VBox status code.
    2789  * @param   pVM         The cross context VM structure.
    2790  * @param   pCtx        Partial VM execution context.
    2791  */
    2792 VMMR3_INT_DECL(int) HMR3EmulateIoBlock(PVM pVM, PCPUMCTX pCtx)
    2793 {
    2794     PVMCPU pVCpu = VMMGetCpu(pVM);
    2795 
    2796     Assert(HMIsEnabled(pVM));
    2797     Log(("HMR3EmulateIoBlock\n"));
    2798 
    2799     /* This is primarily intended to speed up Grub, so we don't care about paged protected mode. */
    2800     if (HMCanEmulateIoBlockEx(pCtx))
    2801     {
    2802         Log(("HMR3EmulateIoBlock -> enabled\n"));
    2803         pVCpu->hm.s.EmulateIoBlock.fEnabled         = true;
    2804         pVCpu->hm.s.EmulateIoBlock.GCPtrFunctionEip = pCtx->rip;
    2805         pVCpu->hm.s.EmulateIoBlock.cr0              = pCtx->cr0;
    2806         return VINF_EM_RESCHEDULE_REM;
    2807     }
    2808     return VINF_SUCCESS;
    2809 }
    2810 
    2811 
    2812 /**
    28132786 * Checks if we can currently use hardware accelerated raw mode.
    28142787 *
     
    28302803    }
    28312804#endif
    2832 
    2833     /* If we're still executing the IO code, then return false. */
    2834     if (   RT_UNLIKELY(pVCpu->hm.s.EmulateIoBlock.fEnabled)
    2835         && pCtx->rip <  pVCpu->hm.s.EmulateIoBlock.GCPtrFunctionEip + 0x200
    2836         && pCtx->rip >  pVCpu->hm.s.EmulateIoBlock.GCPtrFunctionEip - 0x200
    2837         && pCtx->cr0 == pVCpu->hm.s.EmulateIoBlock.cr0)
    2838         return false;
    2839 
    2840     pVCpu->hm.s.EmulateIoBlock.fEnabled = false;
    28412805
    28422806    /* AMD-V supports real & protected mode with or without paging. */
     
    32773241        && pVM->hm.s.vmx.fEnabled
    32783242        && pVM->hm.s.vmx.fUsePreemptTimer;
    3279 }
    3280 
    3281 
    3282 /**
    3283  * Checks if there is an I/O instruction pending.
    3284  *
    3285  * @returns true if pending, false if not.
    3286  * @param   pVCpu       The cross context virtual CPU structure.
    3287  */
    3288 VMMR3_INT_DECL(bool) HMR3HasPendingIOInstr(PVMCPU pVCpu)
    3289 {
    3290     return pVCpu->hm.s.PendingIO.enmType != HMPENDINGIO_INVALID
    3291         && pVCpu->hm.s.PendingIO.GCPtrRip == pVCpu->cpum.GstCtx.rip;
    3292 }
    3293 
    3294 
    3295 /**
    3296  * Restart an I/O instruction that was refused in ring-0
    3297  *
    3298  * @returns Strict VBox status code. Informational status codes other than the one documented
    3299  *          here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
    3300  * @retval  VINF_SUCCESS                Success.
    3301  * @retval  VINF_EM_FIRST-VINF_EM_LAST  Success with some exceptions (see IOM_SUCCESS()), the
    3302  *                                      status code must be passed on to EM.
    3303  * @retval  VERR_NOT_FOUND if no pending I/O instruction.
    3304  *
    3305  * @param   pVM         The cross context VM structure.
    3306  * @param   pVCpu       The cross context virtual CPU structure.
    3307  * @param   pCtx        Pointer to the guest CPU context.
    3308  */
    3309 VMMR3_INT_DECL(VBOXSTRICTRC) HMR3RestartPendingIOInstr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    3310 {
    3311     /*
    3312      * Check if we've got relevant data pending.
    3313      */
    3314     HMPENDINGIO enmType = pVCpu->hm.s.PendingIO.enmType;
    3315     if (enmType == HMPENDINGIO_INVALID)
    3316         return VERR_NOT_FOUND;
    3317     pVCpu->hm.s.PendingIO.enmType = HMPENDINGIO_INVALID;
    3318     if (pVCpu->hm.s.PendingIO.GCPtrRip != pCtx->rip)
    3319         return VERR_NOT_FOUND;
    3320 
    3321     /*
    3322      * Execute pending I/O.
    3323      */
    3324     VBOXSTRICTRC rcStrict;
    3325     switch (enmType)
    3326     {
    3327         case HMPENDINGIO_PORT_READ:
    3328         {
    3329             uint32_t uAndVal = pVCpu->hm.s.PendingIO.s.Port.uAndVal;
    3330             uint32_t u32Val  = 0;
    3331 
    3332             rcStrict = IOMIOPortRead(pVM, pVCpu, pVCpu->hm.s.PendingIO.s.Port.uPort, &u32Val,
    3333                                      pVCpu->hm.s.PendingIO.s.Port.cbSize);
    3334             if (IOM_SUCCESS(rcStrict))
    3335             {
    3336                 /* Write back to the EAX register. */
    3337                 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Val & uAndVal);
    3338                 pCtx->rip = pVCpu->hm.s.PendingIO.GCPtrRipNext;
    3339             }
    3340             break;
    3341         }
    3342 
    3343         default:
    3344             AssertLogRelFailedReturn(VERR_HM_UNKNOWN_IO_INSTRUCTION);
    3345     }
    3346 
    3347     if (IOM_SUCCESS(rcStrict))
    3348     {
    3349         /*
    3350          * Check for I/O breakpoints.
    3351          */
    3352         uint32_t const uDr7 = pCtx->dr[7];
    3353         if (   (   (uDr7 & X86_DR7_ENABLED_MASK)
    3354                 && X86_DR7_ANY_RW_IO(uDr7)
    3355                 && (pCtx->cr4 & X86_CR4_DE))
    3356             || DBGFBpIsHwIoArmed(pVM))
    3357         {
    3358             VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, pVCpu->hm.s.PendingIO.s.Port.uPort,
    3359                                                    pVCpu->hm.s.PendingIO.s.Port.cbSize);
    3360             if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
    3361                 rcStrict2 = TRPMAssertTrap(pVCpu, X86_XCPT_DB, TRPM_TRAP);
    3362             /* rcStrict is VINF_SUCCESS or in [VINF_EM_FIRST..VINF_EM_LAST]. */
    3363             else if (rcStrict2 != VINF_SUCCESS && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
    3364                 rcStrict = rcStrict2;
    3365         }
    3366     }
    3367     return rcStrict;
    33683243}
    33693244
  • trunk/src/VBox/VMM/VMMR3/VMM.cpp

    r73097 r73203  
    519519    STAM_REG(pVM, &pVM->vmm.s.StatRZRetIRETTrap,            STAMTYPE_COUNTER, "/VMM/RZRet/IRETTrap",            STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_IRET_TRAP returns.");
    520520    STAM_REG(pVM, &pVM->vmm.s.StatRZRetEmulate,             STAMTYPE_COUNTER, "/VMM/RZRet/Emulate",             STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION returns.");
    521     STAM_REG(pVM, &pVM->vmm.s.StatRZRetIOBlockEmulate,      STAMTYPE_COUNTER, "/VMM/RZRet/EmulateIOBlock",      STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_EMULATE_IO_BLOCK returns.");
    522521    STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchEmulate,        STAMTYPE_COUNTER, "/VMM/RZRet/PatchEmulate",        STAMUNIT_OCCURENCES, "Number of VINF_PATCH_EMULATE_INSTR returns.");
    523522    STAM_REG(pVM, &pVM->vmm.s.StatRZRetIORead,              STAMTYPE_COUNTER, "/VMM/RZRet/IORead",              STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_IOPORT_READ returns.");
  • trunk/src/VBox/VMM/include/EMHandleRCTmpl.h

    r72983 r73203  
    253253
    254254#ifdef EMHANDLERC_WITH_HM
    255         /*
    256          * (MM)IO intensive code block detected; fall back to the recompiler for better performance
    257          */
    258         case VINF_EM_RAW_EMULATE_IO_BLOCK:
    259             rc = HMR3EmulateIoBlock(pVM, &pVCpu->cpum.GstCtx);
    260             break;
    261 
    262255        case VINF_EM_HM_PATCH_TPR_INSTR:
    263256            rc = HMR3PatchTprInstr(pVM, pVCpu);
  • trunk/src/VBox/VMM/include/HMInternal.h

    r73016 r73203  
    908908        RTGCUINTPTR                 GCPtrFaultAddress;
    909909    } Event;
    910 
    911     /** IO Block emulation state. */
    912     struct
    913     {
    914         bool                    fEnabled;
    915         uint8_t                 u8Align[7];
    916 
    917         /** RIP at the start of the io code we wish to emulate in the recompiler. */
    918         RTGCPTR                 GCPtrFunctionEip;
    919 
    920         uint64_t                cr0;
    921     } EmulateIoBlock;
    922910
    923911    /* Pending IO operation. */
  • trunk/src/VBox/VMM/include/VMMInternal.h

    r72778 r73203  
    363363    STAMCOUNTER                 StatRZRetIRETTrap;
    364364    STAMCOUNTER                 StatRZRetEmulate;
    365     STAMCOUNTER                 StatRZRetIOBlockEmulate;
    366365    STAMCOUNTER                 StatRZRetPatchEmulate;
    367366    STAMCOUNTER                 StatRZRetIORead;
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette