VirtualBox

Changeset 100811 in vbox for trunk/src/VBox/VMM/VMMAll


Ignore:
Timestamp:
Aug 6, 2023 1:54:38 AM (18 months ago)
Author:
vboxsync
Message:

VMM/IEM: Working on implementing the FLAT mode (64-bit mode and 32-bit FLAT) optimizations. Introduced a special 64-bit FS+GS(+CS) variant so we can deal with it the same way as the flat 32-bit variant, this means lumping CS prefixed stuff (unlikely) in with FS and GS. We call the FLAT variant for DS, ES, and SS accesses and the other mode for memory accesses via FS, GS and CS. bugref:10369

Location:
trunk/src/VBox/VMM/VMMAll
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAll.cpp

    r100777 r100811  
    71557155# endif
    71567156}
    7157 #endif
     7157
     7158/**
     7159 * Fetches a data dword from a FLAT address, longjmp on error.
     7160 *
     7161 * @returns The dword
     7162 * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
     7163 * @param   GCPtrMem            The address of the guest memory.
     7164 */
     7165uint32_t iemMemFlatFetchDataU32Jmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
     7166{
     7167# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
     7168    /*
     7169     * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
     7170     */
     7171    RTGCPTR GCPtrEff = GCPtrMem;
     7172    if (RT_LIKELY((GCPtrEff & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(uint32_t)))
     7173    {
     7174        /*
     7175         * TLB lookup.
     7176         */
     7177        uint64_t const uTag  = IEMTLB_CALC_TAG(    &pVCpu->iem.s.DataTlb, GCPtrEff);
     7178        PIEMTLBENTRY   pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
     7179        if (pTlbe->uTag == uTag)
     7180        {
     7181            /*
     7182             * Check TLB page table level access flags.
     7183             */
     7184            uint64_t const fNoUser = IEM_GET_CPL(pVCpu) == 3 ? IEMTLBE_F_PT_NO_USER : 0;
     7185            if (   (pTlbe->fFlagsAndPhysRev & (  IEMTLBE_F_PHYS_REV       | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
     7186                                               | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3  | fNoUser))
     7187                == pVCpu->iem.s.DataTlb.uTlbPhysRev)
     7188            {
     7189                STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
     7190
     7191                /*
     7192                 * Alignment check:
     7193                 */
     7194                /** @todo check priority \#AC vs \#PF */
     7195                if (   !(GCPtrEff & (sizeof(uint32_t) - 1))
     7196                    || !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
     7197                    || !pVCpu->cpum.GstCtx.eflags.Bits.u1AC
     7198                    || IEM_GET_CPL(pVCpu) != 3)
     7199                {
     7200                    /*
     7201                     * Fetch and return the dword
     7202                     */
     7203                    Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
     7204                    Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     7205                    uint32_t const u32Ret = *(uint32_t const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
     7206                    Log9(("IEM RD dword %RGv: %#010x\n", GCPtrMem, u32Ret));
     7207                    return u32Ret;
     7208                }
     7209                Log10(("iemMemFlatFetchDataU32Jmp: Raising #AC for %RGv\n", GCPtrEff));
     7210                iemRaiseAlignmentCheckExceptionJmp(pVCpu);
     7211            }
     7212        }
     7213    }
     7214
     7215    /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
     7216       outdated page pointer, or other troubles. */
     7217    Log10(("iemMemFlatFetchDataU32Jmp: %RGv fallback\n", GCPtrMem));
     7218    return iemMemFetchDataU32SafeJmp(pVCpu, UINT8_MAX, GCPtrMem);
     7219
     7220# else
     7221    uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), UINT8_MAX, GCPtrMem,
     7222                                                             IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
     7223    uint32_t const  u32Ret  = *pu32Src;
     7224    iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
     7225    Log9(("IEM RD dword %RGv: %#010x\n", GCPtrMem, u32Ret));
     7226    return u32Ret;
     7227# endif
     7228}
     7229
     7230#endif /* IEM_WITH_SETJMP */
    71587231
    71597232
  • trunk/src/VBox/VMM/VMMAll/IEMAllThrdFuncs.cpp

    r100732 r100811  
    179179/** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters. */
    180180#define IEM_MC_CALC_RM_EFF_ADDR_THREADED_64(a_GCPtrEff, a_bRmEx, a_uSibAndRspOffset, a_u32Disp, a_cbImm) \
     181    (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr64(pVCpu, a_bRmEx, a_uSibAndRspOffset, a_u32Disp, a_cbImm)
     182
     183/** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters. */
     184#define IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS(a_GCPtrEff, a_bRmEx, a_uSibAndRspOffset, a_u32Disp, a_cbImm) \
    181185    (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr64(pVCpu, a_bRmEx, a_uSibAndRspOffset, a_u32Disp, a_cbImm)
    182186
  • trunk/src/VBox/VMM/VMMAll/IEMAllThrdPython.py

    r100806 r100811  
    143143    ksVariation_32_Addr16   = '_32_Addr16';     ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing.
    144144    ksVariation_64          = '_64';            ##< 64-bit mode code.
     145    ksVariation_64_FsGs     = '_64_FsGs';       ##< 64-bit mode code, with memory accesses via FS or GS.
    145146    ksVariation_64_Addr32   = '_64_Addr32';     ##< 64-bit mode code, address size prefixed to 32-bit addressing.
    146147    kasVariations           = (
     
    153154        ksVariation_32_Addr16,
    154155        ksVariation_64,
     156        ksVariation_64_FsGs,
    155157        ksVariation_64_Addr32,
    156158    );
     
    169171        ksVariation_32_Addr16,
    170172        ksVariation_64,
     173        ksVariation_64_FsGs,
    171174        ksVariation_64_Addr32,
    172175    );
     
    174177        ksVariation_Default,
    175178        ksVariation_64,
     179        ksVariation_64_FsGs,
    176180        ksVariation_32_Flat,
    177181        ksVariation_32,
     
    191195        ksVariation_32_Addr16:  '32-bit w/ address prefix (Addr16)',
    192196        ksVariation_64:         '64-bit',
     197        ksVariation_64_FsGs:    '64-bit with memory accessed via FS or GS',
    193198        ksVariation_64_Addr32:  '64-bit w/ address prefix (Addr32)',
    194199
     
    411416
    412417
     418    ## Maps memory related MCs to info for FLAT conversion.
     419    ## This is used in 64-bit and flat 32-bit variants to skip the unnecessary
     420    ## segmentation checking for every memory access.  Only applied to access
     421    ## via ES, DS and SS.  FS, GS and CS gets the full segmentation threatment,
     422    ## the latter (CS) is just to keep things simple (we could safely fetch via
     423    ## it, but only in 64-bit mode could we safely write via it, IIRC).
     424    kdMemMcToFlatInfo = {
     425        'IEM_MC_FETCH_MEM_U8':                    (  1, 'IEM_MC_FETCH_MEM_FLAT_U8' ),
     426        'IEM_MC_FETCH_MEM16_U8':                  (  1, 'IEM_MC_FETCH_MEM16_FLAT_U8' ),
     427        'IEM_MC_FETCH_MEM32_U8':                  (  1, 'IEM_MC_FETCH_MEM32_FLAT_U8' ),
     428        'IEM_MC_FETCH_MEM_U16':                   (  1, 'IEM_MC_FETCH_MEM_FLAT_U16' ),
     429        'IEM_MC_FETCH_MEM_U16_DISP':              (  1, 'IEM_MC_FETCH_MEM_FLAT_U16_DISP' ),
     430        'IEM_MC_FETCH_MEM_I16':                   (  1, 'IEM_MC_FETCH_MEM_FLAT_I16' ),
     431        'IEM_MC_FETCH_MEM_U32':                   (  1, 'IEM_MC_FETCH_MEM_FLAT_U32' ),
     432        'IEM_MC_FETCH_MEM_U32_DISP':              (  1, 'IEM_MC_FETCH_MEM_FLAT_U32_DISP' ),
     433        'IEM_MC_FETCH_MEM_I32':                   (  1, 'IEM_MC_FETCH_MEM_FLAT_I32' ),
     434        'IEM_MC_FETCH_MEM_U64':                   (  1, 'IEM_MC_FETCH_MEM_FLAT_U64' ),
     435        'IEM_MC_FETCH_MEM_U64_DISP':              (  1, 'IEM_MC_FETCH_MEM_FLAT_U64_DISP' ),
     436        'IEM_MC_FETCH_MEM_U64_ALIGN_U128':        (  1, 'IEM_MC_FETCH_MEM_FLAT_U64_ALIGN_U128' ),
     437        'IEM_MC_FETCH_MEM_I64':                   (  1, 'IEM_MC_FETCH_MEM_FLAT_I64' ),
     438        'IEM_MC_FETCH_MEM_R32':                   (  1, 'IEM_MC_FETCH_MEM_FLAT_R32' ),
     439        'IEM_MC_FETCH_MEM_R64':                   (  1, 'IEM_MC_FETCH_MEM_FLAT_R64' ),
     440        'IEM_MC_FETCH_MEM_R80':                   (  1, 'IEM_MC_FETCH_MEM_FLAT_R80' ),
     441        'IEM_MC_FETCH_MEM_D80':                   (  1, 'IEM_MC_FETCH_MEM_FLAT_D80' ),
     442        'IEM_MC_FETCH_MEM_U128':                  (  1, 'IEM_MC_FETCH_MEM_FLAT_U128' ),
     443        'IEM_MC_FETCH_MEM_U128_NO_AC':            (  1, 'IEM_MC_FETCH_MEM_FLAT_U128_NO_AC' ),
     444        'IEM_MC_FETCH_MEM_U128_ALIGN_SSE':        (  1, 'IEM_MC_FETCH_MEM_FLAT_U128_ALIGN_SSE' ),
     445        'IEM_MC_FETCH_MEM_XMM':                   (  1, 'IEM_MC_FETCH_MEM_FLAT_XMM' ),
     446        'IEM_MC_FETCH_MEM_XMM_NO_AC':             (  1, 'IEM_MC_FETCH_MEM_FLAT_XMM_NO_AC' ),
     447        'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE':         (  1, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE' ),
     448        'IEM_MC_FETCH_MEM_XMM_U32':               (  2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32' ),
     449        'IEM_MC_FETCH_MEM_XMM_U64':               (  2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64' ),
     450        'IEM_MC_FETCH_MEM_U256':                  (  1, 'IEM_MC_FETCH_MEM_FLAT_U256' ),
     451        'IEM_MC_FETCH_MEM_U256_NO_AC':            (  1, 'IEM_MC_FETCH_MEM_FLAT_U256_NO_AC' ),
     452        'IEM_MC_FETCH_MEM_U256_ALIGN_AVX':        (  1, 'IEM_MC_FETCH_MEM_FLAT_U256_ALIGN_AVX' ),
     453        'IEM_MC_FETCH_MEM_YMM':                   (  1, 'IEM_MC_FETCH_MEM_FLAT_YMM' ),
     454        'IEM_MC_FETCH_MEM_YMM_NO_AC':             (  1, 'IEM_MC_FETCH_MEM_FLAT_YMM_NO_AC' ),
     455        'IEM_MC_FETCH_MEM_YMM_ALIGN_AVX':         (  1, 'IEM_MC_FETCH_MEM_FLAT_YMM_ALIGN_AVX' ),
     456        'IEM_MC_FETCH_MEM_U8_ZX_U16':             (  1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U16' ),
     457        'IEM_MC_FETCH_MEM_U8_ZX_U32':             (  1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U32' ),
     458        'IEM_MC_FETCH_MEM_U8_ZX_U64':             (  1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U64' ),
     459        'IEM_MC_FETCH_MEM_U16_ZX_U32':            (  1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U32' ),
     460        'IEM_MC_FETCH_MEM_U16_ZX_U64':            (  1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U64' ),
     461        'IEM_MC_FETCH_MEM_U32_ZX_U64':            (  1, 'IEM_MC_FETCH_MEM_FLAT_U32_ZX_U64' ),
     462        'IEM_MC_FETCH_MEM_U8_SX_U16':             (  1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U16' ),
     463        'IEM_MC_FETCH_MEM_U8_SX_U32':             (  1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U32' ),
     464        'IEM_MC_FETCH_MEM_U8_SX_U64':             (  1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U64' ),
     465        'IEM_MC_FETCH_MEM_U16_SX_U32':            (  1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U32' ),
     466        'IEM_MC_FETCH_MEM_U16_SX_U64':            (  1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U64' ),
     467        'IEM_MC_FETCH_MEM_U32_SX_U64':            (  1, 'IEM_MC_FETCH_MEM_FLAT_U32_SX_U64' ),
     468        'IEM_MC_STORE_MEM_U8':                    (  0, 'IEM_MC_STORE_MEM_FLAT_U8' ),
     469        'IEM_MC_STORE_MEM_U16':                   (  0, 'IEM_MC_STORE_MEM_FLAT_U16' ),
     470        'IEM_MC_STORE_MEM_U32':                   (  0, 'IEM_MC_STORE_MEM_FLAT_U32' ),
     471        'IEM_MC_STORE_MEM_U64':                   (  0, 'IEM_MC_STORE_MEM_FLAT_U64' ),
     472        'IEM_MC_STORE_MEM_U8_CONST':              (  0, 'IEM_MC_STORE_MEM_FLAT_U8_CONST' ),
     473        'IEM_MC_STORE_MEM_U16_CONST':             (  0, 'IEM_MC_STORE_MEM_FLAT_U16_CONST' ),
     474        'IEM_MC_STORE_MEM_U32_CONST':             (  0, 'IEM_MC_STORE_MEM_FLAT_U32_CONST' ),
     475        'IEM_MC_STORE_MEM_U64_CONST':             (  0, 'IEM_MC_STORE_MEM_FLAT_U64_CONST' ),
     476        'IEM_MC_STORE_MEM_U128':                  (  0, 'IEM_MC_STORE_MEM_FLAT_U128' ),
     477        'IEM_MC_STORE_MEM_U128_ALIGN_SSE':        (  0, 'IEM_MC_STORE_MEM_FLAT_U128_ALIGN_SSE' ),
     478        'IEM_MC_STORE_MEM_U256':                  (  0, 'IEM_MC_STORE_MEM_FLAT_U256' ),
     479        'IEM_MC_STORE_MEM_U256_ALIGN_AVX':        (  0, 'IEM_MC_STORE_MEM_FLAT_U256_ALIGN_AVX' ),
     480        'IEM_MC_PUSH_U16':                        ( -1, 'IEM_MC_FLAT_PUSH_U16' ),
     481        'IEM_MC_PUSH_U32':                        ( -1, 'IEM_MC_FLAT_PUSH_U32' ),
     482        'IEM_MC_PUSH_U32_SREG':                   ( -1, 'IEM_MC_FLAT_PUSH_U32_SREG' ),
     483        'IEM_MC_PUSH_U64':                        ( -1, 'IEM_MC_FLAT_PUSH_U64' ),
     484        'IEM_MC_POP_U16':                         ( -1, 'IEM_MC_FLAT_POP_U16' ),
     485        'IEM_MC_POP_U32':                         ( -1, 'IEM_MC_FLAT_POP_U32' ),
     486        'IEM_MC_POP_U64':                         ( -1, 'IEM_MC_FLAT_POP_U64' ),
     487        'IEM_MC_POP_EX_U16':                      ( -1, 'IEM_MC_FLAT_POP_EX_U16' ),
     488        'IEM_MC_POP_EX_U32':                      ( -1, 'IEM_MC_FLAT_POP_EX_U32' ),
     489        'IEM_MC_POP_EX_U64':                      ( -1, 'IEM_MC_FLAT_POP_EX_U64' ),
     490        'IEM_MC_MEM_MAP':                         (  2, 'IEM_MC_MEM_FLAT_MAP' ),
     491        'IEM_MC_MEM_MAP_EX':                      (  3, 'IEM_MC_MEM_FLAT_MAP_EX' ),
     492    };
     493
    413494    def analyzeMorphStmtForThreaded(self, aoStmts, iParamRef = 0):
    414495        """
     
    489570                        oNewStmt.asParams.append(self.dParamRefs['pVCpu->iem.s.enmEffOpSize'][0].sNewName);
    490571                    oNewStmt.sName += '_THREADED';
    491                     if self.sVariation in (self.ksVariation_64, self.ksVariation_64_Addr32):
     572                    if self.sVariation in (self.ksVariation_64, self.ksVariation_64_FsGs, self.ksVariation_64_Addr32):
    492573                        oNewStmt.sName += '_PC64';
    493574                    elif self.sVariation == self.ksVariation_16_Pre386:
     
    506587                    oNewStmt.sName += '_THREADED';
    507588                    oNewStmt.asParams.insert(0, self.dParamRefs['cbInstr'][0].sNewName);
     589
     590                # ... and in FLAT modes we must morph memory access into FLAT accesses ...
     591                elif (    self.sVariation in (self.ksVariation_64, self.ksVariation_32_Flat,)
     592                      and (   oNewStmt.sName.startswith('IEM_MC_FETCH_MEM')
     593                           or (oNewStmt.sName.startswith('IEM_MC_STORE_MEM_') and oNewStmt.sName.find('_BY_REF') < 0)
     594                           or oNewStmt.sName.startswith('IEM_MC_MEM_MAP')
     595                           or (oNewStmt.sName.startswith('IEM_MC_PUSH') and oNewStmt.sName.find('_FPU') < 0)
     596                           or oNewStmt.sName.startswith('IEM_MC_POP') )):
     597                    idxEffSeg = self.kdMemMcToFlatInfo[oNewStmt.sName][0];
     598                    if idxEffSeg != -1:
     599                        if (    oNewStmt.asParams[idxEffSeg].find('iEffSeg') < 0
     600                            and oNewStmt.asParams[idxEffSeg] not in ('X86_SREG_ES', ) ):
     601                            self.raiseProblem('Expected iEffSeg as param #%d to %s: %s'
     602                                              % (idxEffSeg + 1, oNewStmt.sName, oNewStmt.asParams[idxEffSeg],));
     603                        oNewStmt.asParams.pop(idxEffSeg);
     604                    oNewStmt.sName = self.kdMemMcToFlatInfo[oNewStmt.sName][1];
    508605
    509606                # Process branches of conditionals recursively.
     
    682779                                                             'uint32_t', oStmt, sStdRef = 'u32Disp'));
    683780                else:
    684                     assert self.sVariation in (self.ksVariation_64, self.ksVariation_64_Addr32);
     781                    assert self.sVariation in (self.ksVariation_64, self.ksVariation_64_FsGs, self.ksVariation_64_Addr32);
    685782                    self.aoParamRefs.append(ThreadedParamRef('IEM_GET_MODRM_EX(pVCpu, bRm)',
    686783                                                             'uint8_t',  oStmt, sStdRef = 'bRmEx'));
     
    698795                self.aoParamRefs.append(ThreadedParamRef(sOrgRef, 'uint16_t', oStmt, idxReg, sStdRef = sStdRef));
    699796                aiSkipParams[idxReg] = True; # Skip the parameter below.
     797
     798            # If in flat mode variation, ignore the effective segment parameter to memory MCs.
     799            if (    self.sVariation in (self.ksVariation_64, self.ksVariation_32_Flat,)
     800                and oStmt.sName in self.kdMemMcToFlatInfo
     801                and self.kdMemMcToFlatInfo[oStmt.sName][0] != -1):
     802                aiSkipParams[self.kdMemMcToFlatInfo[oStmt.sName][0]] = True;
    700803
    701804            # Inspect the target of calls to see if we need to pass down a
     
    11131216        sSwitchValue = 'pVCpu->iem.s.fExec & IEM_F_MODE_CPUMODE_MASK';
    11141217        if (   ThrdFnVar.ksVariation_64_Addr32 in dByVari
     1218            or ThrdFnVar.ksVariation_64_FsGs   in dByVari
    11151219            or ThrdFnVar.ksVariation_32_Addr16 in dByVari
    11161220            or ThrdFnVar.ksVariation_32_Flat   in dByVari
     
    11181222            sSwitchValue  = '(pVCpu->iem.s.fExec & (IEM_F_MODE_CPUMODE_MASK | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK))';
    11191223            sSwitchValue += ' | (pVCpu->iem.s.enmEffAddrMode == (pVCpu->iem.s.fExec & IEM_F_MODE_CPUMODE_MASK) ? 0 : 8)';
    1120             fSimple       = False;
     1224            # Accesses via FS and GS and CS goes thru non-FLAT functions. (CS
     1225            # is not writable in 32-bit mode (at least), thus the penalty mode
     1226            # for any accesses via it (simpler this way).)
     1227            sSwitchValue += ' | (pVCpu->iem.s.iEffSeg < X86_SREG_FS && pVCpu->iem.s.iEffSeg != X86_SREG_CS ? 0 : 16)';
     1228            fSimple       = False;                                              # threaded functions.
    11211229
    11221230        #
     
    11281236            assert not fSimple;
    11291237            aoCases.extend([
    1130                 Case('IEMMODE_64BIT',     ThrdFnVar.ksVariation_64),
    1131                 Case('IEMMODE_64BIT | 8', ThrdFnVar.ksVariation_64_Addr32),
     1238                Case('IEMMODE_64BIT',       ThrdFnVar.ksVariation_64),
     1239                Case('IEMMODE_64BIT | 16',  ThrdFnVar.ksVariation_64_FsGs),
     1240                Case('IEMMODE_64BIT | 8 | 16', None), # fall thru
     1241                Case('IEMMODE_64BIT | 8',   ThrdFnVar.ksVariation_64_Addr32),
    11321242            ]);
    11331243        elif ThrdFnVar.ksVariation_64 in dByVari:
     
    11381248            assert not fSimple;
    11391249            aoCases.extend([
    1140                 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_32_Flat),
    1141                 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
    1142                 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8', None), # fall thru
    1143                 Case('IEMMODE_32BIT | 8', ThrdFnVar.ksVariation_32_Addr16),
     1250                Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK',         ThrdFnVar.ksVariation_32_Flat),
     1251                Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16',    None), # fall thru
     1252                Case('IEMMODE_32BIT | 16',                                          None), # fall thru
     1253                Case('IEMMODE_32BIT',                                               ThrdFnVar.ksVariation_32),
     1254                Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8',     None), # fall thru
     1255                Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8 | 16',None), # fall thru
     1256                Case('IEMMODE_32BIT                                       | 8 | 16',None), # fall thru
     1257                Case('IEMMODE_32BIT                                       | 8',     ThrdFnVar.ksVariation_32_Addr16),
    11441258            ]);
    11451259        elif ThrdFnVar.ksVariation_32 in dByVari:
     
    11501264            assert not fSimple;
    11511265            aoCases.extend([
    1152                 Case('IEMMODE_16BIT',     ThrdFnVar.ksVariation_16),
    1153                 Case('IEMMODE_16BIT | 8', ThrdFnVar.ksVariation_16_Addr32),
     1266                Case('IEMMODE_16BIT | 16',      None), # fall thru
     1267                Case('IEMMODE_16BIT',           ThrdFnVar.ksVariation_16),
     1268                Case('IEMMODE_16BIT | 8 | 16',  None), # fall thru
     1269                Case('IEMMODE_16BIT | 8',       ThrdFnVar.ksVariation_16_Addr32),
    11541270            ]);
    11551271        elif ThrdFnVar.ksVariation_16 in dByVari:
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette