Changeset 100811 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Aug 6, 2023 1:54:38 AM (18 months ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r100777 r100811 7155 7155 # endif 7156 7156 } 7157 #endif 7157 7158 /** 7159 * Fetches a data dword from a FLAT address, longjmp on error. 7160 * 7161 * @returns The dword 7162 * @param pVCpu The cross context virtual CPU structure of the calling thread. 7163 * @param GCPtrMem The address of the guest memory. 7164 */ 7165 uint32_t iemMemFlatFetchDataU32Jmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP 7166 { 7167 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) 7168 /* 7169 * Convert from segmented to flat address and check that it doesn't cross a page boundrary. 7170 */ 7171 RTGCPTR GCPtrEff = GCPtrMem; 7172 if (RT_LIKELY((GCPtrEff & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(uint32_t))) 7173 { 7174 /* 7175 * TLB lookup. 7176 */ 7177 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff); 7178 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag); 7179 if (pTlbe->uTag == uTag) 7180 { 7181 /* 7182 * Check TLB page table level access flags. 7183 */ 7184 uint64_t const fNoUser = IEM_GET_CPL(pVCpu) == 3 ? IEMTLBE_F_PT_NO_USER : 0; 7185 if ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ 7186 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser)) 7187 == pVCpu->iem.s.DataTlb.uTlbPhysRev) 7188 { 7189 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 7190 7191 /* 7192 * Alignment check: 7193 */ 7194 /** @todo check priority \#AC vs \#PF */ 7195 if ( !(GCPtrEff & (sizeof(uint32_t) - 1)) 7196 || !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM) 7197 || !pVCpu->cpum.GstCtx.eflags.Bits.u1AC 7198 || IEM_GET_CPL(pVCpu) != 3) 7199 { 7200 /* 7201 * Fetch and return the dword 7202 */ 7203 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 7204 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 7205 uint32_t const u32Ret = *(uint32_t const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]; 7206 Log9(("IEM RD dword %RGv: %#010x\n", GCPtrMem, u32Ret)); 7207 return u32Ret; 7208 } 7209 Log10(("iemMemFlatFetchDataU32Jmp: Raising #AC for %RGv\n", GCPtrEff)); 7210 iemRaiseAlignmentCheckExceptionJmp(pVCpu); 7211 } 7212 } 7213 } 7214 7215 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception 7216 outdated page pointer, or other troubles. */ 7217 Log10(("iemMemFlatFetchDataU32Jmp: %RGv fallback\n", GCPtrMem)); 7218 return iemMemFetchDataU32SafeJmp(pVCpu, UINT8_MAX, GCPtrMem); 7219 7220 # else 7221 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), UINT8_MAX, GCPtrMem, 7222 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1); 7223 uint32_t const u32Ret = *pu32Src; 7224 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R); 7225 Log9(("IEM RD dword %RGv: %#010x\n", GCPtrMem, u32Ret)); 7226 return u32Ret; 7227 # endif 7228 } 7229 7230 #endif /* IEM_WITH_SETJMP */ 7158 7231 7159 7232 -
trunk/src/VBox/VMM/VMMAll/IEMAllThrdFuncs.cpp
r100732 r100811 179 179 /** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters. */ 180 180 #define IEM_MC_CALC_RM_EFF_ADDR_THREADED_64(a_GCPtrEff, a_bRmEx, a_uSibAndRspOffset, a_u32Disp, a_cbImm) \ 181 (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr64(pVCpu, a_bRmEx, a_uSibAndRspOffset, a_u32Disp, a_cbImm) 182 183 /** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters. */ 184 #define IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS(a_GCPtrEff, a_bRmEx, a_uSibAndRspOffset, a_u32Disp, a_cbImm) \ 181 185 (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr64(pVCpu, a_bRmEx, a_uSibAndRspOffset, a_u32Disp, a_cbImm) 182 186 -
trunk/src/VBox/VMM/VMMAll/IEMAllThrdPython.py
r100806 r100811 143 143 ksVariation_32_Addr16 = '_32_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing. 144 144 ksVariation_64 = '_64'; ##< 64-bit mode code. 145 ksVariation_64_FsGs = '_64_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS. 145 146 ksVariation_64_Addr32 = '_64_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing. 146 147 kasVariations = ( … … 153 154 ksVariation_32_Addr16, 154 155 ksVariation_64, 156 ksVariation_64_FsGs, 155 157 ksVariation_64_Addr32, 156 158 ); … … 169 171 ksVariation_32_Addr16, 170 172 ksVariation_64, 173 ksVariation_64_FsGs, 171 174 ksVariation_64_Addr32, 172 175 ); … … 174 177 ksVariation_Default, 175 178 ksVariation_64, 179 ksVariation_64_FsGs, 176 180 ksVariation_32_Flat, 177 181 ksVariation_32, … … 191 195 ksVariation_32_Addr16: '32-bit w/ address prefix (Addr16)', 192 196 ksVariation_64: '64-bit', 197 ksVariation_64_FsGs: '64-bit with memory accessed via FS or GS', 193 198 ksVariation_64_Addr32: '64-bit w/ address prefix (Addr32)', 194 199 … … 411 416 412 417 418 ## Maps memory related MCs to info for FLAT conversion. 419 ## This is used in 64-bit and flat 32-bit variants to skip the unnecessary 420 ## segmentation checking for every memory access. Only applied to access 421 ## via ES, DS and SS. FS, GS and CS gets the full segmentation threatment, 422 ## the latter (CS) is just to keep things simple (we could safely fetch via 423 ## it, but only in 64-bit mode could we safely write via it, IIRC). 424 kdMemMcToFlatInfo = { 425 'IEM_MC_FETCH_MEM_U8': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8' ), 426 'IEM_MC_FETCH_MEM16_U8': ( 1, 'IEM_MC_FETCH_MEM16_FLAT_U8' ), 427 'IEM_MC_FETCH_MEM32_U8': ( 1, 'IEM_MC_FETCH_MEM32_FLAT_U8' ), 428 'IEM_MC_FETCH_MEM_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16' ), 429 'IEM_MC_FETCH_MEM_U16_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_DISP' ), 430 'IEM_MC_FETCH_MEM_I16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I16' ), 431 'IEM_MC_FETCH_MEM_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32' ), 432 'IEM_MC_FETCH_MEM_U32_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_DISP' ), 433 'IEM_MC_FETCH_MEM_I32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I32' ), 434 'IEM_MC_FETCH_MEM_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64' ), 435 'IEM_MC_FETCH_MEM_U64_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_DISP' ), 436 'IEM_MC_FETCH_MEM_U64_ALIGN_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_ALIGN_U128' ), 437 'IEM_MC_FETCH_MEM_I64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I64' ), 438 'IEM_MC_FETCH_MEM_R32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R32' ), 439 'IEM_MC_FETCH_MEM_R64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R64' ), 440 'IEM_MC_FETCH_MEM_R80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R80' ), 441 'IEM_MC_FETCH_MEM_D80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_D80' ), 442 'IEM_MC_FETCH_MEM_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128' ), 443 'IEM_MC_FETCH_MEM_U128_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_NO_AC' ), 444 'IEM_MC_FETCH_MEM_U128_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_ALIGN_SSE' ), 445 'IEM_MC_FETCH_MEM_XMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM' ), 446 'IEM_MC_FETCH_MEM_XMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_NO_AC' ), 447 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE' ), 448 'IEM_MC_FETCH_MEM_XMM_U32': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32' ), 449 'IEM_MC_FETCH_MEM_XMM_U64': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64' ), 450 'IEM_MC_FETCH_MEM_U256': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256' ), 451 'IEM_MC_FETCH_MEM_U256_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_NO_AC' ), 452 'IEM_MC_FETCH_MEM_U256_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_ALIGN_AVX' ), 453 'IEM_MC_FETCH_MEM_YMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM' ), 454 'IEM_MC_FETCH_MEM_YMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_NO_AC' ), 455 'IEM_MC_FETCH_MEM_YMM_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_ALIGN_AVX' ), 456 'IEM_MC_FETCH_MEM_U8_ZX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U16' ), 457 'IEM_MC_FETCH_MEM_U8_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U32' ), 458 'IEM_MC_FETCH_MEM_U8_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U64' ), 459 'IEM_MC_FETCH_MEM_U16_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U32' ), 460 'IEM_MC_FETCH_MEM_U16_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U64' ), 461 'IEM_MC_FETCH_MEM_U32_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_ZX_U64' ), 462 'IEM_MC_FETCH_MEM_U8_SX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U16' ), 463 'IEM_MC_FETCH_MEM_U8_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U32' ), 464 'IEM_MC_FETCH_MEM_U8_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U64' ), 465 'IEM_MC_FETCH_MEM_U16_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U32' ), 466 'IEM_MC_FETCH_MEM_U16_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U64' ), 467 'IEM_MC_FETCH_MEM_U32_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_SX_U64' ), 468 'IEM_MC_STORE_MEM_U8': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8' ), 469 'IEM_MC_STORE_MEM_U16': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16' ), 470 'IEM_MC_STORE_MEM_U32': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32' ), 471 'IEM_MC_STORE_MEM_U64': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64' ), 472 'IEM_MC_STORE_MEM_U8_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8_CONST' ), 473 'IEM_MC_STORE_MEM_U16_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16_CONST' ), 474 'IEM_MC_STORE_MEM_U32_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32_CONST' ), 475 'IEM_MC_STORE_MEM_U64_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64_CONST' ), 476 'IEM_MC_STORE_MEM_U128': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128' ), 477 'IEM_MC_STORE_MEM_U128_ALIGN_SSE': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128_ALIGN_SSE' ), 478 'IEM_MC_STORE_MEM_U256': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256' ), 479 'IEM_MC_STORE_MEM_U256_ALIGN_AVX': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256_ALIGN_AVX' ), 480 'IEM_MC_PUSH_U16': ( -1, 'IEM_MC_FLAT_PUSH_U16' ), 481 'IEM_MC_PUSH_U32': ( -1, 'IEM_MC_FLAT_PUSH_U32' ), 482 'IEM_MC_PUSH_U32_SREG': ( -1, 'IEM_MC_FLAT_PUSH_U32_SREG' ), 483 'IEM_MC_PUSH_U64': ( -1, 'IEM_MC_FLAT_PUSH_U64' ), 484 'IEM_MC_POP_U16': ( -1, 'IEM_MC_FLAT_POP_U16' ), 485 'IEM_MC_POP_U32': ( -1, 'IEM_MC_FLAT_POP_U32' ), 486 'IEM_MC_POP_U64': ( -1, 'IEM_MC_FLAT_POP_U64' ), 487 'IEM_MC_POP_EX_U16': ( -1, 'IEM_MC_FLAT_POP_EX_U16' ), 488 'IEM_MC_POP_EX_U32': ( -1, 'IEM_MC_FLAT_POP_EX_U32' ), 489 'IEM_MC_POP_EX_U64': ( -1, 'IEM_MC_FLAT_POP_EX_U64' ), 490 'IEM_MC_MEM_MAP': ( 2, 'IEM_MC_MEM_FLAT_MAP' ), 491 'IEM_MC_MEM_MAP_EX': ( 3, 'IEM_MC_MEM_FLAT_MAP_EX' ), 492 }; 493 413 494 def analyzeMorphStmtForThreaded(self, aoStmts, iParamRef = 0): 414 495 """ … … 489 570 oNewStmt.asParams.append(self.dParamRefs['pVCpu->iem.s.enmEffOpSize'][0].sNewName); 490 571 oNewStmt.sName += '_THREADED'; 491 if self.sVariation in (self.ksVariation_64, self.ksVariation_64_ Addr32):572 if self.sVariation in (self.ksVariation_64, self.ksVariation_64_FsGs, self.ksVariation_64_Addr32): 492 573 oNewStmt.sName += '_PC64'; 493 574 elif self.sVariation == self.ksVariation_16_Pre386: … … 506 587 oNewStmt.sName += '_THREADED'; 507 588 oNewStmt.asParams.insert(0, self.dParamRefs['cbInstr'][0].sNewName); 589 590 # ... and in FLAT modes we must morph memory access into FLAT accesses ... 591 elif ( self.sVariation in (self.ksVariation_64, self.ksVariation_32_Flat,) 592 and ( oNewStmt.sName.startswith('IEM_MC_FETCH_MEM') 593 or (oNewStmt.sName.startswith('IEM_MC_STORE_MEM_') and oNewStmt.sName.find('_BY_REF') < 0) 594 or oNewStmt.sName.startswith('IEM_MC_MEM_MAP') 595 or (oNewStmt.sName.startswith('IEM_MC_PUSH') and oNewStmt.sName.find('_FPU') < 0) 596 or oNewStmt.sName.startswith('IEM_MC_POP') )): 597 idxEffSeg = self.kdMemMcToFlatInfo[oNewStmt.sName][0]; 598 if idxEffSeg != -1: 599 if ( oNewStmt.asParams[idxEffSeg].find('iEffSeg') < 0 600 and oNewStmt.asParams[idxEffSeg] not in ('X86_SREG_ES', ) ): 601 self.raiseProblem('Expected iEffSeg as param #%d to %s: %s' 602 % (idxEffSeg + 1, oNewStmt.sName, oNewStmt.asParams[idxEffSeg],)); 603 oNewStmt.asParams.pop(idxEffSeg); 604 oNewStmt.sName = self.kdMemMcToFlatInfo[oNewStmt.sName][1]; 508 605 509 606 # Process branches of conditionals recursively. … … 682 779 'uint32_t', oStmt, sStdRef = 'u32Disp')); 683 780 else: 684 assert self.sVariation in (self.ksVariation_64, self.ksVariation_64_ Addr32);781 assert self.sVariation in (self.ksVariation_64, self.ksVariation_64_FsGs, self.ksVariation_64_Addr32); 685 782 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_MODRM_EX(pVCpu, bRm)', 686 783 'uint8_t', oStmt, sStdRef = 'bRmEx')); … … 698 795 self.aoParamRefs.append(ThreadedParamRef(sOrgRef, 'uint16_t', oStmt, idxReg, sStdRef = sStdRef)); 699 796 aiSkipParams[idxReg] = True; # Skip the parameter below. 797 798 # If in flat mode variation, ignore the effective segment parameter to memory MCs. 799 if ( self.sVariation in (self.ksVariation_64, self.ksVariation_32_Flat,) 800 and oStmt.sName in self.kdMemMcToFlatInfo 801 and self.kdMemMcToFlatInfo[oStmt.sName][0] != -1): 802 aiSkipParams[self.kdMemMcToFlatInfo[oStmt.sName][0]] = True; 700 803 701 804 # Inspect the target of calls to see if we need to pass down a … … 1113 1216 sSwitchValue = 'pVCpu->iem.s.fExec & IEM_F_MODE_CPUMODE_MASK'; 1114 1217 if ( ThrdFnVar.ksVariation_64_Addr32 in dByVari 1218 or ThrdFnVar.ksVariation_64_FsGs in dByVari 1115 1219 or ThrdFnVar.ksVariation_32_Addr16 in dByVari 1116 1220 or ThrdFnVar.ksVariation_32_Flat in dByVari … … 1118 1222 sSwitchValue = '(pVCpu->iem.s.fExec & (IEM_F_MODE_CPUMODE_MASK | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK))'; 1119 1223 sSwitchValue += ' | (pVCpu->iem.s.enmEffAddrMode == (pVCpu->iem.s.fExec & IEM_F_MODE_CPUMODE_MASK) ? 0 : 8)'; 1120 fSimple = False; 1224 # Accesses via FS and GS and CS goes thru non-FLAT functions. (CS 1225 # is not writable in 32-bit mode (at least), thus the penalty mode 1226 # for any accesses via it (simpler this way).) 1227 sSwitchValue += ' | (pVCpu->iem.s.iEffSeg < X86_SREG_FS && pVCpu->iem.s.iEffSeg != X86_SREG_CS ? 0 : 16)'; 1228 fSimple = False; # threaded functions. 1121 1229 1122 1230 # … … 1128 1236 assert not fSimple; 1129 1237 aoCases.extend([ 1130 Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64), 1131 Case('IEMMODE_64BIT | 8', ThrdFnVar.ksVariation_64_Addr32), 1238 Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64), 1239 Case('IEMMODE_64BIT | 16', ThrdFnVar.ksVariation_64_FsGs), 1240 Case('IEMMODE_64BIT | 8 | 16', None), # fall thru 1241 Case('IEMMODE_64BIT | 8', ThrdFnVar.ksVariation_64_Addr32), 1132 1242 ]); 1133 1243 elif ThrdFnVar.ksVariation_64 in dByVari: … … 1138 1248 assert not fSimple; 1139 1249 aoCases.extend([ 1140 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_32_Flat), 1141 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32), 1142 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8', None), # fall thru 1143 Case('IEMMODE_32BIT | 8', ThrdFnVar.ksVariation_32_Addr16), 1250 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_32_Flat), 1251 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None), # fall thru 1252 Case('IEMMODE_32BIT | 16', None), # fall thru 1253 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32), 1254 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8', None), # fall thru 1255 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8 | 16',None), # fall thru 1256 Case('IEMMODE_32BIT | 8 | 16',None), # fall thru 1257 Case('IEMMODE_32BIT | 8', ThrdFnVar.ksVariation_32_Addr16), 1144 1258 ]); 1145 1259 elif ThrdFnVar.ksVariation_32 in dByVari: … … 1150 1264 assert not fSimple; 1151 1265 aoCases.extend([ 1152 Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16), 1153 Case('IEMMODE_16BIT | 8', ThrdFnVar.ksVariation_16_Addr32), 1266 Case('IEMMODE_16BIT | 16', None), # fall thru 1267 Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16), 1268 Case('IEMMODE_16BIT | 8 | 16', None), # fall thru 1269 Case('IEMMODE_16BIT | 8', ThrdFnVar.ksVariation_16_Addr32), 1154 1270 ]); 1155 1271 elif ThrdFnVar.ksVariation_16 in dByVari:
Note:
See TracChangeset
for help on using the changeset viewer.