Changeset 47444 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Jul 29, 2013 12:37:31 AM (12 years ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r47429 r47444 742 742 if (rcOldPassUp == VINF_SUCCESS) 743 743 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp); 744 /* If both are EM scheduling code , use EM priority rules. */744 /* If both are EM scheduling codes, use EM priority rules. */ 745 745 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST 746 746 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST) … … 764 764 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp)); 765 765 return VINF_SUCCESS; 766 } 767 768 769 /** 770 * Initializes the execution state. 771 * 772 * @param pIemCpu The per CPU IEM state. 773 * @param fBypassHandlers Whether to bypass access handlers. 774 */ 775 DECLINLINE(void) iemInitExec(PIEMCPU pIemCpu, bool fBypassHandlers) 776 { 777 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 778 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu); 779 780 #if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0)) 781 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs)); 782 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss)); 783 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es)); 784 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds)); 785 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs)); 786 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs)); 787 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr)); 788 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr)); 789 #endif 790 791 #ifdef VBOX_WITH_RAW_MODE_NOT_R0 792 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu); 793 #endif 794 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu); 795 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx) 796 ? IEMMODE_64BIT 797 : pCtx->cs.Attr.n.u1DefBig /** @todo check if this is correct... */ 798 ? IEMMODE_32BIT 799 : IEMMODE_16BIT; 800 pIemCpu->enmCpuMode = enmMode; 801 #ifdef VBOX_STRICT 802 pIemCpu->enmDefAddrMode = (IEMMODE)0xc0fe; 803 pIemCpu->enmEffAddrMode = (IEMMODE)0xc0fe; 804 pIemCpu->enmDefOpSize = (IEMMODE)0xc0fe; 805 pIemCpu->enmEffOpSize = (IEMMODE)0xc0fe; 806 pIemCpu->fPrefixes = (IEMMODE)0xfeedbeef; 807 pIemCpu->uRexReg = 127; 808 pIemCpu->uRexB = 127; 809 pIemCpu->uRexIndex = 127; 810 pIemCpu->iEffSeg = 127; 811 pIemCpu->offOpcode = 127; 812 pIemCpu->cbOpcode = 127; 813 #endif 814 815 pIemCpu->cActiveMappings = 0; 816 pIemCpu->iNextMapping = 0; 817 pIemCpu->rcPassUp = VINF_SUCCESS; 818 pIemCpu->fBypassHandlers = fBypassHandlers; 819 #ifdef VBOX_WITH_RAW_MODE_NOT_R0 820 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0 821 && pCtx->cs.u64Base == 0 822 && pCtx->cs.u32Limit == UINT32_MAX 823 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip); 824 if (!pIemCpu->fInPatchCode) 825 CPUMRawLeave(pVCpu, CPUMCTX2CORE(pCtx), VINF_SUCCESS); 826 #endif 766 827 } 767 828 … … 9021 9082 9022 9083 /** 9023 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and 9024 * IEMExecOneWithPrefetchedByPC. 9025 * 9026 * @return Strict VBox status code. 9027 * @param pVCpu The current virtual CPU. 9084 * Makes status code addjustments (pass up from I/O and access handler) 9085 * as well as maintaining statistics. 9086 * 9087 * @returns Strict VBox status code to pass up. 9028 9088 * @param pIemCpu The IEM per CPU data. 9029 * @param fExecuteInhibit If set, execute the instruction following CLI, 9030 * POP SS and MOV SS,GR. 9031 */ 9032 DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, PIEMCPU pIemCpu, bool fExecuteInhibit) 9033 { 9034 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b); 9035 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]); 9036 if (rcStrict == VINF_SUCCESS) 9037 pIemCpu->cInstructions++; 9038 //#ifdef DEBUG 9039 // AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr)); 9040 //#endif 9041 9042 /* Execute the next instruction as well if a cli, pop ss or 9043 mov ss, Gr has just completed successfully. */ 9044 if ( fExecuteInhibit 9045 && rcStrict == VINF_SUCCESS 9046 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 9047 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip ) 9048 { 9049 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, pIemCpu->fBypassHandlers); 9050 if (rcStrict == VINF_SUCCESS) 9051 { 9052 b; IEM_OPCODE_GET_NEXT_U8(&b); 9053 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]); 9054 if (rcStrict == VINF_SUCCESS) 9055 pIemCpu->cInstructions++; 9056 } 9057 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111)); 9058 } 9059 9060 /* 9061 * Return value fiddling and statistics. 9062 */ 9089 * @param rcStrict The status from executing an instruction. 9090 */ 9091 DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PIEMCPU pIemCpu, VBOXSTRICTRC rcStrict) 9092 { 9063 9093 if (rcStrict != VINF_SUCCESS) 9064 9094 { 9065 9095 if (RT_SUCCESS(rcStrict)) 9066 9096 { 9067 AssertMsg(rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST, ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 9097 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) 9098 || rcStrict == VINF_IOM_R3_IOPORT_READ 9099 || rcStrict == VINF_IOM_R3_IOPORT_WRITE 9100 || rcStrict == VINF_IOM_R3_MMIO_READ 9101 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE 9102 || rcStrict == VINF_IOM_R3_MMIO_WRITE 9103 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 9068 9104 int32_t const rcPassUp = pIemCpu->rcPassUp; 9069 9105 if (rcPassUp == VINF_SUCCESS) … … 9100 9136 } 9101 9137 9138 return rcStrict; 9139 } 9140 9141 9142 /** 9143 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and 9144 * IEMExecOneWithPrefetchedByPC. 9145 * 9146 * @return Strict VBox status code. 9147 * @param pVCpu The current virtual CPU. 9148 * @param pIemCpu The IEM per CPU data. 9149 * @param fExecuteInhibit If set, execute the instruction following CLI, 9150 * POP SS and MOV SS,GR. 9151 */ 9152 DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, PIEMCPU pIemCpu, bool fExecuteInhibit) 9153 { 9154 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b); 9155 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]); 9156 if (rcStrict == VINF_SUCCESS) 9157 pIemCpu->cInstructions++; 9158 //#ifdef DEBUG 9159 // AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr)); 9160 //#endif 9161 9162 /* Execute the next instruction as well if a cli, pop ss or 9163 mov ss, Gr has just completed successfully. */ 9164 if ( fExecuteInhibit 9165 && rcStrict == VINF_SUCCESS 9166 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 9167 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip ) 9168 { 9169 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, pIemCpu->fBypassHandlers); 9170 if (rcStrict == VINF_SUCCESS) 9171 { 9172 b; IEM_OPCODE_GET_NEXT_U8(&b); 9173 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]); 9174 if (rcStrict == VINF_SUCCESS) 9175 pIemCpu->cInstructions++; 9176 } 9177 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111)); 9178 } 9179 9180 /* 9181 * Return value fiddling, statistics and sanity assertions. 9182 */ 9183 rcStrict = iemExecStatusCodeFiddling(pIemCpu, rcStrict); 9184 9102 9185 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->cs)); 9103 9186 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ss)); … … 9481 9564 #endif 9482 9565 9566 9567 9568 /** 9569 * Interface for HM and EM for executing string I/O OUT (write) instructions. 9570 * 9571 * This API ASSUMES that the caller has already verified that the guest code is 9572 * allowed to access the I/O port. (The I/O port is in the DX register in the 9573 * guest state.) 9574 * 9575 * @returns Strict VBox status code. 9576 * @param pVCpu The cross context per virtual CPU structure. 9577 * @param cbValue The size of the I/O port access (1, 2, or 4). 9578 * @param enmAddrMode The addressing mode. 9579 * @param fRepPrefix Indicates whether a repeat prefix is used 9580 * (doesn't matter which for this instruction). 9581 * @param cbInstr The instruction length in bytes. 9582 * @param iEffSeg The effective segment address. 9583 */ 9584 VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode, 9585 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg) 9586 { 9587 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG); 9588 AssertReturn(cbInstr - 1U <= 14U, VERR_IEM_INVALID_INSTR_LENGTH); 9589 9590 /* 9591 * State init. 9592 */ 9593 PIEMCPU pIemCpu = &pVCpu->iem.s; 9594 iemInitExec(pIemCpu, false /*fBypassHandlers*/); 9595 9596 /* 9597 * Switch orgy for getting to the right handler. 9598 */ 9599 VBOXSTRICTRC rcStrict; 9600 if (fRepPrefix) 9601 { 9602 switch (enmAddrMode) 9603 { 9604 case IEMMODE_16BIT: 9605 switch (cbValue) 9606 { 9607 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break; 9608 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break; 9609 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break; 9610 default: 9611 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE); 9612 } 9613 break; 9614 9615 case IEMMODE_32BIT: 9616 switch (cbValue) 9617 { 9618 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break; 9619 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break; 9620 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break; 9621 default: 9622 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE); 9623 } 9624 break; 9625 9626 case IEMMODE_64BIT: 9627 switch (cbValue) 9628 { 9629 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break; 9630 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break; 9631 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break; 9632 default: 9633 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE); 9634 } 9635 break; 9636 9637 default: 9638 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE); 9639 } 9640 } 9641 else 9642 { 9643 switch (enmAddrMode) 9644 { 9645 case IEMMODE_16BIT: 9646 switch (cbValue) 9647 { 9648 case 1: rcStrict = iemCImpl_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break; 9649 case 2: rcStrict = iemCImpl_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break; 9650 case 4: rcStrict = iemCImpl_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break; 9651 default: 9652 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE); 9653 } 9654 break; 9655 9656 case IEMMODE_32BIT: 9657 switch (cbValue) 9658 { 9659 case 1: rcStrict = iemCImpl_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break; 9660 case 2: rcStrict = iemCImpl_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break; 9661 case 4: rcStrict = iemCImpl_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break; 9662 default: 9663 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE); 9664 } 9665 break; 9666 9667 case IEMMODE_64BIT: 9668 switch (cbValue) 9669 { 9670 case 1: rcStrict = iemCImpl_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break; 9671 case 2: rcStrict = iemCImpl_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break; 9672 case 4: rcStrict = iemCImpl_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break; 9673 default: 9674 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE); 9675 } 9676 break; 9677 9678 default: 9679 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE); 9680 } 9681 } 9682 9683 return iemExecStatusCodeFiddling(pIemCpu, rcStrict); 9684 } 9685 9686 9687 /** 9688 * Interface for HM and EM for executing string I/O IN (read) instructions. 9689 * 9690 * This API ASSUMES that the caller has already verified that the guest code is 9691 * allowed to access the I/O port. (The I/O port is in the DX register in the 9692 * guest state.) 9693 * 9694 * @returns Strict VBox status code. 9695 * @param pVCpu The cross context per virtual CPU structure. 9696 * @param cbValue The size of the I/O port access (1, 2, or 4). 9697 * @param enmAddrMode The addressing mode. 9698 * @param fRepPrefix Indicates whether a repeat prefix is used 9699 * (doesn't matter which for this instruction). 9700 * @param cbInstr The instruction length in bytes. 9701 */ 9702 VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode, 9703 bool fRepPrefix, uint8_t cbInstr) 9704 { 9705 AssertReturn(cbInstr - 1U <= 14U, VERR_IEM_INVALID_INSTR_LENGTH); 9706 9707 /* 9708 * State init. 9709 */ 9710 PIEMCPU pIemCpu = &pVCpu->iem.s; 9711 iemInitExec(pIemCpu, false /*fBypassHandlers*/); 9712 9713 /* 9714 * Switch orgy for getting to the right handler. 9715 */ 9716 VBOXSTRICTRC rcStrict; 9717 if (fRepPrefix) 9718 { 9719 switch (enmAddrMode) 9720 { 9721 case IEMMODE_16BIT: 9722 switch (cbValue) 9723 { 9724 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break; 9725 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break; 9726 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break; 9727 default: 9728 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE); 9729 } 9730 break; 9731 9732 case IEMMODE_32BIT: 9733 switch (cbValue) 9734 { 9735 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break; 9736 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break; 9737 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break; 9738 default: 9739 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE); 9740 } 9741 break; 9742 9743 case IEMMODE_64BIT: 9744 switch (cbValue) 9745 { 9746 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break; 9747 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break; 9748 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break; 9749 default: 9750 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE); 9751 } 9752 break; 9753 9754 default: 9755 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE); 9756 } 9757 } 9758 else 9759 { 9760 switch (enmAddrMode) 9761 { 9762 case IEMMODE_16BIT: 9763 switch (cbValue) 9764 { 9765 case 1: rcStrict = iemCImpl_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break; 9766 case 2: rcStrict = iemCImpl_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break; 9767 case 4: rcStrict = iemCImpl_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break; 9768 default: 9769 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE); 9770 } 9771 break; 9772 9773 case IEMMODE_32BIT: 9774 switch (cbValue) 9775 { 9776 case 1: rcStrict = iemCImpl_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break; 9777 case 2: rcStrict = iemCImpl_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break; 9778 case 4: rcStrict = iemCImpl_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break; 9779 default: 9780 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE); 9781 } 9782 break; 9783 9784 case IEMMODE_64BIT: 9785 switch (cbValue) 9786 { 9787 case 1: rcStrict = iemCImpl_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break; 9788 case 2: rcStrict = iemCImpl_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break; 9789 case 4: rcStrict = iemCImpl_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break; 9790 default: 9791 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE); 9792 } 9793 break; 9794 9795 default: 9796 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE); 9797 } 9798 } 9799 9800 return iemExecStatusCodeFiddling(pIemCpu, rcStrict); 9801 } 9802 -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h
r47288 r47444 1018 1018 * Implements 'INS' (no rep) 1019 1019 */ 1020 IEM_CIMPL_DEF_ 0(RT_CONCAT4(iemCImpl_ins_op,OP_SIZE,_addr,ADDR_SIZE))1020 IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_ins_op,OP_SIZE,_addr,ADDR_SIZE), bool, fIoChecked) 1021 1021 { 1022 1022 PVM pVM = IEMCPU_TO_VM(pIemCpu); … … 1038 1038 * ASSUMES nothing is read from the I/O port before traps are taken. 1039 1039 */ 1040 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, pCtx->dx, OP_SIZE / 8); 1041 if (rcStrict != VINF_SUCCESS) 1042 return rcStrict; 1040 if (!fIoChecked) 1041 { 1042 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, pCtx->dx, OP_SIZE / 8); 1043 if (rcStrict != VINF_SUCCESS) 1044 return rcStrict; 1045 } 1043 1046 1044 1047 OP_TYPE *puMem; … … 1077 1080 * Implements 'REP INS'. 1078 1081 */ 1079 IEM_CIMPL_DEF_ 0(RT_CONCAT4(iemCImpl_rep_ins_op,OP_SIZE,_addr,ADDR_SIZE))1082 IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_ins_op,OP_SIZE,_addr,ADDR_SIZE), bool, fIoChecked) 1080 1083 { 1081 1084 PVM pVM = IEMCPU_TO_VM(pIemCpu); … … 1087 1090 */ 1088 1091 uint16_t const u16Port = pCtx->dx; 1089 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, OP_SIZE / 8); 1090 if (rcStrict != VINF_SUCCESS) 1091 return rcStrict; 1092 VBOXSTRICTRC rcStrict; 1093 if (!fIoChecked) 1094 { 1095 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, OP_SIZE / 8); 1096 if (rcStrict != VINF_SUCCESS) 1097 return rcStrict; 1098 } 1092 1099 1093 1100 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX; … … 1148 1155 /** @todo Change the I/O manager interface to make use of 1149 1156 * mapped buffers instead of leaving those bits to the 1150 * device implementation ?*/1157 * device implementation! */ 1151 1158 PGMPAGEMAPLOCK PgLockMem; 1152 1159 OP_TYPE *puMem; … … 1171 1178 { 1172 1179 if (IOM_SUCCESS(rcStrict)) 1180 { 1173 1181 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict); 1174 if (uCounterReg == 0) 1175 iemRegAddToRip(pIemCpu, cbInstr); 1182 if (uCounterReg == 0) 1183 iemRegAddToRip(pIemCpu, cbInstr); 1184 } 1176 1185 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem); 1177 1186 return rcStrict; … … 1226 1235 if (rcStrict != VINF_SUCCESS) 1227 1236 { 1228 if (IOM_SUCCESS(rcStrict))1229 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);1230 1237 if (uCounterReg == 0) 1231 1238 iemRegAddToRip(pIemCpu, cbInstr); 1239 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict); 1232 1240 return rcStrict; 1233 1241 } … … 1246 1254 * Implements 'OUTS' (no rep) 1247 1255 */ 1248 IEM_CIMPL_DEF_ 1(RT_CONCAT4(iemCImpl_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)1256 IEM_CIMPL_DEF_2(RT_CONCAT4(iemCImpl_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg, bool, fIoChecked) 1249 1257 { 1250 1258 PVM pVM = IEMCPU_TO_VM(pIemCpu); … … 1257 1265 * ASSUMES nothing is read from the I/O port before traps are taken. 1258 1266 */ 1259 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, pCtx->dx, OP_SIZE / 8); 1260 if (rcStrict != VINF_SUCCESS) 1261 return rcStrict; 1267 if (!fIoChecked) 1268 { 1269 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, pCtx->dx, OP_SIZE / 8); 1270 if (rcStrict != VINF_SUCCESS) 1271 return rcStrict; 1272 } 1262 1273 1263 1274 OP_TYPE uValue; … … 1287 1298 * Implements 'REP OUTS'. 1288 1299 */ 1289 IEM_CIMPL_DEF_ 1(RT_CONCAT4(iemCImpl_rep_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)1300 IEM_CIMPL_DEF_2(RT_CONCAT4(iemCImpl_rep_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg, bool, fIoChecked) 1290 1301 { 1291 1302 PVM pVM = IEMCPU_TO_VM(pIemCpu); … … 1297 1308 */ 1298 1309 uint16_t const u16Port = pCtx->dx; 1299 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, OP_SIZE / 8); 1300 if (rcStrict != VINF_SUCCESS) 1301 return rcStrict; 1310 VBOXSTRICTRC rcStrict; 1311 if (!fIoChecked) 1312 { 1313 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, OP_SIZE / 8); 1314 if (rcStrict != VINF_SUCCESS) 1315 return rcStrict; 1316 } 1302 1317 1303 1318 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX; … … 1372 1387 { 1373 1388 if (IOM_SUCCESS(rcStrict)) 1389 { 1374 1390 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict); 1375 if (uCounterReg == 0) 1376 iemRegAddToRip(pIemCpu, cbInstr); 1391 if (uCounterReg == 0) 1392 iemRegAddToRip(pIemCpu, cbInstr); 1393 } 1377 1394 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem); 1378 1395 return rcStrict; … … 1422 1439 { 1423 1440 if (IOM_SUCCESS(rcStrict)) 1441 { 1442 if (uCounterReg == 0) 1443 iemRegAddToRip(pIemCpu, cbInstr); 1424 1444 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict); 1425 if (uCounterReg == 0) 1426 iemRegAddToRip(pIemCpu, cbInstr); 1445 } 1427 1446 return rcStrict; 1428 1447 } -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h
r47415 r47444 8385 8385 switch (pIemCpu->enmEffAddrMode) 8386 8386 { 8387 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_ 0(iemCImpl_rep_ins_op8_addr16);8388 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_ 0(iemCImpl_rep_ins_op8_addr32);8389 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_ 0(iemCImpl_rep_ins_op8_addr64);8387 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op8_addr16, false); 8388 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op8_addr32, false); 8389 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op8_addr64, false); 8390 8390 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 8391 8391 } … … 8396 8396 switch (pIemCpu->enmEffAddrMode) 8397 8397 { 8398 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_ 0(iemCImpl_ins_op8_addr16);8399 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_ 0(iemCImpl_ins_op8_addr32);8400 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_ 0(iemCImpl_ins_op8_addr64);8398 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op8_addr16, false); 8399 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op8_addr32, false); 8400 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op8_addr64, false); 8401 8401 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 8402 8402 } … … 8417 8417 switch (pIemCpu->enmEffAddrMode) 8418 8418 { 8419 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_ 0(iemCImpl_rep_ins_op16_addr16);8420 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_ 0(iemCImpl_rep_ins_op16_addr32);8421 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_ 0(iemCImpl_rep_ins_op16_addr64);8419 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op16_addr16, false); 8420 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op16_addr32, false); 8421 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op16_addr64, false); 8422 8422 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 8423 8423 } … … 8427 8427 switch (pIemCpu->enmEffAddrMode) 8428 8428 { 8429 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_ 0(iemCImpl_rep_ins_op32_addr16);8430 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_ 0(iemCImpl_rep_ins_op32_addr32);8431 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_ 0(iemCImpl_rep_ins_op32_addr64);8429 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op32_addr16, false); 8430 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op32_addr32, false); 8431 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op32_addr64, false); 8432 8432 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 8433 8433 } … … 8444 8444 switch (pIemCpu->enmEffAddrMode) 8445 8445 { 8446 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_ 0(iemCImpl_ins_op16_addr16);8447 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_ 0(iemCImpl_ins_op16_addr32);8448 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_ 0(iemCImpl_ins_op16_addr64);8446 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op16_addr16, false); 8447 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op16_addr32, false); 8448 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op16_addr64, false); 8449 8449 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 8450 8450 } … … 8454 8454 switch (pIemCpu->enmEffAddrMode) 8455 8455 { 8456 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_ 0(iemCImpl_ins_op32_addr16);8457 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_ 0(iemCImpl_ins_op32_addr32);8458 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_ 0(iemCImpl_ins_op32_addr64);8456 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op32_addr16, false); 8457 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op32_addr32, false); 8458 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op32_addr64, false); 8459 8459 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 8460 8460 } … … 8475 8475 switch (pIemCpu->enmEffAddrMode) 8476 8476 { 8477 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_ 1(iemCImpl_rep_outs_op8_addr16, pIemCpu->iEffSeg);8478 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_ 1(iemCImpl_rep_outs_op8_addr32, pIemCpu->iEffSeg);8479 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_ 1(iemCImpl_rep_outs_op8_addr64, pIemCpu->iEffSeg);8477 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op8_addr16, pIemCpu->iEffSeg, false); 8478 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op8_addr32, pIemCpu->iEffSeg, false); 8479 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op8_addr64, pIemCpu->iEffSeg, false); 8480 8480 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 8481 8481 } … … 8486 8486 switch (pIemCpu->enmEffAddrMode) 8487 8487 { 8488 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_ 1(iemCImpl_outs_op8_addr16, pIemCpu->iEffSeg);8489 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_ 1(iemCImpl_outs_op8_addr32, pIemCpu->iEffSeg);8490 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_ 1(iemCImpl_outs_op8_addr64, pIemCpu->iEffSeg);8488 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op8_addr16, pIemCpu->iEffSeg, false); 8489 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op8_addr32, pIemCpu->iEffSeg, false); 8490 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op8_addr64, pIemCpu->iEffSeg, false); 8491 8491 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 8492 8492 } … … 8507 8507 switch (pIemCpu->enmEffAddrMode) 8508 8508 { 8509 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_ 1(iemCImpl_rep_outs_op16_addr16, pIemCpu->iEffSeg);8510 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_ 1(iemCImpl_rep_outs_op16_addr32, pIemCpu->iEffSeg);8511 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_ 1(iemCImpl_rep_outs_op16_addr64, pIemCpu->iEffSeg);8509 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op16_addr16, pIemCpu->iEffSeg, false); 8510 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op16_addr32, pIemCpu->iEffSeg, false); 8511 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op16_addr64, pIemCpu->iEffSeg, false); 8512 8512 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 8513 8513 } … … 8517 8517 switch (pIemCpu->enmEffAddrMode) 8518 8518 { 8519 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_ 1(iemCImpl_rep_outs_op32_addr16, pIemCpu->iEffSeg);8520 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_ 1(iemCImpl_rep_outs_op32_addr32, pIemCpu->iEffSeg);8521 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_ 1(iemCImpl_rep_outs_op32_addr64, pIemCpu->iEffSeg);8519 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op32_addr16, pIemCpu->iEffSeg, false); 8520 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op32_addr32, pIemCpu->iEffSeg, false); 8521 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op32_addr64, pIemCpu->iEffSeg, false); 8522 8522 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 8523 8523 } … … 8534 8534 switch (pIemCpu->enmEffAddrMode) 8535 8535 { 8536 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_ 1(iemCImpl_outs_op16_addr16, pIemCpu->iEffSeg);8537 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_ 1(iemCImpl_outs_op16_addr32, pIemCpu->iEffSeg);8538 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_ 1(iemCImpl_outs_op16_addr64, pIemCpu->iEffSeg);8536 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op16_addr16, pIemCpu->iEffSeg, false); 8537 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op16_addr32, pIemCpu->iEffSeg, false); 8538 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op16_addr64, pIemCpu->iEffSeg, false); 8539 8539 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 8540 8540 } … … 8544 8544 switch (pIemCpu->enmEffAddrMode) 8545 8545 { 8546 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_ 1(iemCImpl_outs_op32_addr16, pIemCpu->iEffSeg);8547 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_ 1(iemCImpl_outs_op32_addr32, pIemCpu->iEffSeg);8548 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_ 1(iemCImpl_outs_op32_addr64, pIemCpu->iEffSeg);8546 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op32_addr16, pIemCpu->iEffSeg, false); 8547 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op32_addr32, pIemCpu->iEffSeg, false); 8548 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op32_addr64, pIemCpu->iEffSeg, false); 8549 8549 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 8550 8550 } -
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r46420 r47444 1767 1767 * @remarks This must be called *AFTER* PGMUpdateCR3. 1768 1768 * 1769 * @returns VBox status code.1770 1769 * @param pVCpu Pointer to the VMCPU. 1771 1770 * @param paPdpes The four PDPE values. The array pointed to must … … 1774 1773 * @remarks No-long-jump zone!!! 1775 1774 */ 1776 VMM_INT_DECL( int) PGMGstUpdatePaePdpes(PVMCPU pVCpu, PCX86PDPE paPdpes)1775 VMM_INT_DECL(void) PGMGstUpdatePaePdpes(PVMCPU pVCpu, PCX86PDPE paPdpes) 1777 1776 { 1778 1777 Assert(pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT); … … 1795 1794 1796 1795 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES); 1797 return VINF_SUCCESS;1798 1796 } 1799 1797 … … 2037 2035 * @returns VBox status code. 2038 2036 * @retval VINF_SUCCESS. 2039 * @retval (If applied when not in nested mode: VINF_PGM_SYNC_CR3 if monitoring2040 * requires a CR3 sync. This can safely be ignored and overridden since2041 * the FF will be set too then.)2037 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync (not for nested 2038 * paging modes). This can safely be ignored and overridden since the 2039 * FF will be set too then. 2042 2040 * @param pVCpu Pointer to the VMCPU. 2043 2041 * @param cr3 The new cr3.
Note:
See TracChangeset
for help on using the changeset viewer.