Changeset 47288 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Jul 20, 2013 10:52:31 PM (12 years ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r47283 r47288 4779 4779 * @param pHid Pointer to the hidden register. 4780 4780 * @param iSegReg The register number. 4781 */ 4782 static VBOXSTRICTRC iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg) 4783 { 4784 if (!pHid->Attr.n.u1Present) 4785 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg); 4786 4787 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE) 4788 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) ) 4789 && pIemCpu->enmCpuMode != IEMMODE_64BIT ) 4790 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W); 4791 4792 /** @todo DPL/RPL/CPL? */ 4793 4781 * @param pu64BaseAddr Where to return the base address to use for the 4782 * segment. (In 64-bit code it may differ from the 4783 * base in the hidden segment.) 4784 */ 4785 static VBOXSTRICTRC iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr) 4786 { 4787 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) 4788 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base; 4789 else 4790 { 4791 if (!pHid->Attr.n.u1Present) 4792 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg); 4793 4794 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE) 4795 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) ) 4796 && pIemCpu->enmCpuMode != IEMMODE_64BIT ) 4797 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W); 4798 *pu64BaseAddr = pHid->u64Base; 4799 } 4794 4800 return VINF_SUCCESS; 4795 4801 } … … 4805 4811 * @param pHid Pointer to the hidden register. 4806 4812 * @param iSegReg The register number. 4807 */ 4808 static VBOXSTRICTRC iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg) 4809 { 4810 if (!pHid->Attr.n.u1Present) 4811 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg); 4812 4813 if ( (pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE 4814 && pIemCpu->enmCpuMode != IEMMODE_64BIT ) 4815 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R); 4816 4817 /** @todo DPL/RPL/CPL? */ 4818 4813 * @param pu64BaseAddr Where to return the base address to use for the 4814 * segment. (In 64-bit code it may differ from the 4815 * base in the hidden segment.) 4816 */ 4817 static VBOXSTRICTRC iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr) 4818 { 4819 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) 4820 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base; 4821 else 4822 { 4823 if (!pHid->Attr.n.u1Present) 4824 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg); 4825 4826 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE) 4827 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R); 4828 *pu64BaseAddr = pHid->u64Base; 4829 } 4819 4830 return VINF_SUCCESS; 4820 4831 } -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h
r45305 r47288 48 48 # define ADDR_rCX rcx 49 49 # define ADDR2_TYPE uint64_t 50 # define IS_64_BIT_CODE(a_pIemCpu) (true) 50 51 #else 51 52 # error "Bad ADDR_SIZE." 52 53 #endif 53 54 #define ADDR_TYPE RT_CONCAT3(uint,ADDR_SIZE,_t) 55 56 #if ADDR_SIZE == 64 || OP_SIZE == 64 57 # define IS_64_BIT_CODE(a_pIemCpu) (true) 58 #elif ADDR_SIZE == 32 59 # define IS_64_BIT_CODE(a_pIemCpu) ((a_pIemCpu)->enmCpuMode == IEMMODE_64BIT) 60 #else 61 # define IS_64_BIT_CODE(a_pIemCpu) (false) 62 #endif 54 63 55 64 … … 72 81 73 82 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pIemCpu, iEffSeg); 74 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrc1Hid, iEffSeg); 75 if (rcStrict != VINF_SUCCESS) 76 return rcStrict; 77 78 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES); 83 uint64_t uSrc1Base; 84 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrc1Hid, iEffSeg, &uSrc1Base); 85 if (rcStrict != VINF_SUCCESS) 86 return rcStrict; 87 88 uint64_t uSrc2Base; 89 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uSrc2Base); 79 90 if (rcStrict != VINF_SUCCESS) 80 91 return rcStrict; … … 93 104 * Do segmentation and virtual page stuff. 94 105 */ 95 #if ADDR_SIZE != 64 96 ADDR2_TYPE uVirtSrc1Addr = (uint32_t)pSrc1Hid->u64Base + uSrc1AddrReg; 97 ADDR2_TYPE uVirtSrc2Addr = (uint32_t)pCtx->es.u64Base + uSrc2AddrReg; 98 #else 99 uint64_t uVirtSrc1Addr = uSrc1AddrReg; 100 uint64_t uVirtSrc2Addr = uSrc2AddrReg; 101 #endif 106 ADDR2_TYPE uVirtSrc1Addr = uSrc1AddrReg + (ADDR2_TYPE)uSrc1Base; 107 ADDR2_TYPE uVirtSrc2Addr = uSrc2AddrReg + (ADDR2_TYPE)uSrc2Base; 102 108 uint32_t cLeftSrc1Page = (PAGE_SIZE - (uVirtSrc1Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8); 103 109 if (cLeftSrc1Page > uCounterReg) … … 107 113 108 114 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */ 109 && cbIncr > 0 /** @todo Implementreverse direction string ops. */110 #if ADDR_SIZE != 64 111 &&uSrc1AddrReg < pSrc1Hid->u32Limit112 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit113 && uSrc2AddrReg < pCtx->es.u32Limit114 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit115 #endif 115 && cbIncr > 0 /** @todo Optimize reverse direction string ops. */ 116 && ( IS_64_BIT_CODE(pIemCpu) 117 || ( uSrc1AddrReg < pSrc1Hid->u32Limit 118 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit 119 && uSrc2AddrReg < pCtx->es.u32Limit 120 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit) 121 ) 116 122 ) 117 123 { … … 231 237 232 238 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pIemCpu, iEffSeg); 233 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrc1Hid, iEffSeg); 234 if (rcStrict != VINF_SUCCESS) 235 return rcStrict; 236 237 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES); 239 uint64_t uSrc1Base; 240 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrc1Hid, iEffSeg, &uSrc1Base); 241 if (rcStrict != VINF_SUCCESS) 242 return rcStrict; 243 244 uint64_t uSrc2Base; 245 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uSrc2Base); 238 246 if (rcStrict != VINF_SUCCESS) 239 247 return rcStrict; … … 252 260 * Do segmentation and virtual page stuff. 253 261 */ 254 #if ADDR_SIZE != 64 255 ADDR2_TYPE uVirtSrc1Addr = (uint32_t)pSrc1Hid->u64Base + uSrc1AddrReg; 256 ADDR2_TYPE uVirtSrc2Addr = (uint32_t)pCtx->es.u64Base + uSrc2AddrReg; 257 #else 258 uint64_t uVirtSrc1Addr = uSrc1AddrReg; 259 uint64_t uVirtSrc2Addr = uSrc2AddrReg; 260 #endif 262 ADDR2_TYPE uVirtSrc1Addr = uSrc1AddrReg + (ADDR2_TYPE)uSrc1Base; 263 ADDR2_TYPE uVirtSrc2Addr = uSrc2AddrReg + (ADDR2_TYPE)uSrc2Base; 261 264 uint32_t cLeftSrc1Page = (PAGE_SIZE - (uVirtSrc1Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8); 262 265 if (cLeftSrc1Page > uCounterReg) … … 266 269 267 270 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */ 268 && cbIncr > 0 /** @todo Implementreverse direction string ops. */269 #if ADDR_SIZE != 64 270 &&uSrc1AddrReg < pSrc1Hid->u32Limit271 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit272 && uSrc2AddrReg < pCtx->es.u32Limit273 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit274 #endif 271 && cbIncr > 0 /** @todo Optimize reverse direction string ops. */ 272 && ( IS_64_BIT_CODE(pIemCpu) 273 || ( uSrc1AddrReg < pSrc1Hid->u32Limit 274 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit 275 && uSrc2AddrReg < pCtx->es.u32Limit 276 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit) 277 ) 275 278 ) 276 279 { … … 389 392 } 390 393 391 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES); 394 uint64_t uBaseAddr; 395 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uBaseAddr); 392 396 if (rcStrict != VINF_SUCCESS) 393 397 return rcStrict; … … 406 410 * Do segmentation and virtual page stuff. 407 411 */ 408 #if ADDR_SIZE != 64 409 ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->es.u64Base + uAddrReg; 410 #else 411 uint64_t uVirtAddr = uAddrReg; 412 #endif 412 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr; 413 413 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8); 414 414 if (cLeftPage > uCounterReg) … … 416 416 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */ 417 417 && cbIncr > 0 /** @todo Implement reverse direction string ops. */ 418 #if ADDR_SIZE != 64 419 &&uAddrReg < pCtx->es.u32Limit420 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit421 #endif 418 && ( IS_64_BIT_CODE(pIemCpu) 419 || ( uAddrReg < pCtx->es.u32Limit 420 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit) 421 ) 422 422 ) 423 423 { … … 514 514 } 515 515 516 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES); 516 uint64_t uBaseAddr; 517 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uBaseAddr); 517 518 if (rcStrict != VINF_SUCCESS) 518 519 return rcStrict; … … 531 532 * Do segmentation and virtual page stuff. 532 533 */ 533 #if ADDR_SIZE != 64 534 ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->es.u64Base + uAddrReg; 535 #else 536 uint64_t uVirtAddr = uAddrReg; 537 #endif 534 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr; 538 535 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8); 539 536 if (cLeftPage > uCounterReg) … … 541 538 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */ 542 539 && cbIncr > 0 /** @todo Implement reverse direction string ops. */ 543 #if ADDR_SIZE != 64 544 &&uAddrReg < pCtx->es.u32Limit545 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit546 #endif 540 && ( IS_64_BIT_CODE(pIemCpu) 541 || ( uAddrReg < pCtx->es.u32Limit 542 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit) 543 ) 547 544 ) 548 545 { … … 641 638 642 639 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pIemCpu, iEffSeg); 643 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrcHid, iEffSeg); 644 if (rcStrict != VINF_SUCCESS) 645 return rcStrict; 646 647 rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES); 640 uint64_t uSrcBase; 641 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrcHid, iEffSeg, &uSrcBase); 642 if (rcStrict != VINF_SUCCESS) 643 return rcStrict; 644 645 uint64_t uDstBase; 646 rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uDstBase); 648 647 if (rcStrict != VINF_SUCCESS) 649 648 return rcStrict; … … 685 684 * Do segmentation and virtual page stuff. 686 685 */ 687 #if ADDR_SIZE != 64 688 ADDR2_TYPE uVirtSrcAddr = (uint32_t)pSrcHid->u64Base + uSrcAddrReg; 689 ADDR2_TYPE uVirtDstAddr = (uint32_t)pCtx->es.u64Base + uDstAddrReg; 690 #else 691 uint64_t uVirtSrcAddr = uSrcAddrReg; 692 uint64_t uVirtDstAddr = uDstAddrReg; 693 #endif 686 ADDR2_TYPE uVirtSrcAddr = uSrcAddrReg + (ADDR2_TYPE)uSrcBase; 687 ADDR2_TYPE uVirtDstAddr = uDstAddrReg + (ADDR2_TYPE)uDstBase; 694 688 uint32_t cLeftSrcPage = (PAGE_SIZE - (uVirtSrcAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8); 695 689 if (cLeftSrcPage > uCounterReg) … … 700 694 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */ 701 695 && cbIncr > 0 /** @todo Implement reverse direction string ops. */ 702 #if ADDR_SIZE != 64 703 &&uSrcAddrReg < pSrcHid->u32Limit704 && uSrcAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit705 && uDstAddrReg < pCtx->es.u32Limit706 && uDstAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit707 #endif 696 && ( IS_64_BIT_CODE(pIemCpu) 697 || ( uSrcAddrReg < pSrcHid->u32Limit 698 && uSrcAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit 699 && uDstAddrReg < pCtx->es.u32Limit 700 && uDstAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit) 701 ) 708 702 ) 709 703 { … … 804 798 } 805 799 806 VBOXSTRICTRC rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES); 800 uint64_t uBaseAddr; 801 VBOXSTRICTRC rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uBaseAddr); 807 802 if (rcStrict != VINF_SUCCESS) 808 803 return rcStrict; … … 830 825 * Do segmentation and virtual page stuff. 831 826 */ 832 #if ADDR_SIZE != 64 833 ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->es.u64Base + uAddrReg; 834 #else 835 uint64_t uVirtAddr = uAddrReg; 836 #endif 827 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr; 837 828 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8); 838 829 if (cLeftPage > uCounterReg) … … 840 831 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */ 841 832 && cbIncr > 0 /** @todo Implement reverse direction string ops. */ 842 #if ADDR_SIZE != 64 843 &&uAddrReg < pCtx->es.u32Limit844 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit845 #endif 833 && ( IS_64_BIT_CODE(pIemCpu) 834 || ( uAddrReg < pCtx->es.u32Limit 835 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit) 836 ) 846 837 ) 847 838 { … … 928 919 929 920 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pIemCpu, iEffSeg); 930 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrcHid, iEffSeg); 921 uint64_t uBaseAddr; 922 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrcHid, iEffSeg, &uBaseAddr); 931 923 if (rcStrict != VINF_SUCCESS) 932 924 return rcStrict; … … 943 935 * Do segmentation and virtual page stuff. 944 936 */ 945 #if ADDR_SIZE != 64 946 ADDR2_TYPE uVirtAddr = (uint32_t)pSrcHid->u64Base + uAddrReg; 947 #else 948 uint64_t uVirtAddr = uAddrReg; 949 #endif 937 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr; 950 938 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8); 951 939 if (cLeftPage > uCounterReg) … … 953 941 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */ 954 942 && cbIncr > 0 /** @todo Implement reverse direction string ops. */ 955 #if ADDR_SIZE != 64 956 &&uAddrReg < pSrcHid->u32Limit957 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit958 #endif 943 && ( IS_64_BIT_CODE(pIemCpu) 944 || ( uAddrReg < pSrcHid->u32Limit 945 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit) 946 ) 959 947 ) 960 948 { … … 1110 1098 } 1111 1099 1112 rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES); 1100 uint64_t uBaseAddr; 1101 rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uBaseAddr); 1113 1102 if (rcStrict != VINF_SUCCESS) 1114 1103 return rcStrict; … … 1134 1123 * Do segmentation and virtual page stuff. 1135 1124 */ 1136 #if ADDR_SIZE != 64 1137 ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->es.u64Base + uAddrReg; 1138 #else 1139 uint64_t uVirtAddr = uAddrReg; 1140 #endif 1125 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr; 1141 1126 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8); 1142 1127 if (cLeftPage > uCounterReg) … … 1144 1129 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */ 1145 1130 && cbIncr > 0 /** @todo Implement reverse direction string ops. */ 1146 #if ADDR_SIZE != 64 1147 &&uAddrReg < pCtx->es.u32Limit1148 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit1149 #endif 1131 && ( IS_64_BIT_CODE(pIemCpu) 1132 || ( uAddrReg < pCtx->es.u32Limit 1133 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit) 1134 ) 1150 1135 ) 1151 1136 { … … 1324 1309 1325 1310 PCCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iEffSeg); 1326 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pHid, iEffSeg); 1311 uint64_t uBaseAddr; 1312 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pHid, iEffSeg, &uBaseAddr); 1327 1313 if (rcStrict != VINF_SUCCESS) 1328 1314 return rcStrict; … … 1339 1325 * Do segmentation and virtual page stuff. 1340 1326 */ 1341 #if ADDR_SIZE != 64 1342 ADDR2_TYPE uVirtAddr = (uint32_t)pHid->u64Base + uAddrReg; 1343 #else 1344 uint64_t uVirtAddr = uAddrReg; 1345 #endif 1327 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr; 1346 1328 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8); 1347 1329 if (cLeftPage > uCounterReg) … … 1349 1331 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */ 1350 1332 && cbIncr > 0 /** @todo Implement reverse direction string ops. */ 1351 #if ADDR_SIZE != 64 1352 &&uAddrReg < pHid->u32Limit1353 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pHid->u32Limit1354 #endif 1333 && ( IS_64_BIT_CODE(pIemCpu) 1334 || ( uAddrReg < pHid->u32Limit 1335 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pHid->u32Limit) 1336 ) 1355 1337 ) 1356 1338 { … … 1468 1450 #undef ADDR_TYPE 1469 1451 #undef ADDR2_TYPE 1470 1452 #undef IS_64_BIT_CODE
Note:
See TracChangeset
for help on using the changeset viewer.