VirtualBox

Changeset 101722 in vbox


Ignore:
Timestamp:
Nov 3, 2023 12:36:45 AM (18 months ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
159818
Message:

VMM/IEM: Identify the different styles IEM_MC_CALL_XXX and mark these using new IEM_CIMPL_F_CALLS_XXX flags in the python scripts. This is necessary as IEM_MC_CALL_CIMPL_X, IEM_MC_CALL_FPU_AIMPL_X, IEM_MC_CALL_MMX_AIMPL_X, and IEM_MC_CALL_SSE_AIMPL_X all have hidden parameters that need to be accounted for when recompiling to native code (for more perfect register allocations for variables). Split up the different cmpxchg16b AIMPL/CIMPL variations into separate MC blocks, as we can't mix AIMPL and CIMPL calls in the same block (also, in the CIMPL case, there would be unused tail code after the call). bugref:10371

Location:
trunk/src/VBox/VMM
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAllInstPython.py

    r101694 r101722  
    19731973    'IEM_CIMPL_F_XCPT':                 ('IEM_CIMPL_F_BRANCH_INDIRECT', 'IEM_CIMPL_F_BRANCH_FAR',
    19741974                                         'IEM_CIMPL_F_MODE', 'IEM_CIMPL_F_RFLAGS', 'IEM_CIMPL_F_VMEXIT', ),
     1975    'IEM_CIMPL_F_CALLS_CIMPL':              (),
     1976    'IEM_CIMPL_F_CALLS_AIMPL':              (),
     1977    'IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE': (),
    19751978};
    19761979class McBlock(object):
     
    19781981    Microcode block (IEM_MC_BEGIN ... IEM_MC_END, IEM_MC_DEFER_TO_CIMPL_x_RET).
    19791982    """
     1983
     1984    ## @name Macro expansion types.
     1985    ## @{
     1986    kMacroExp_None    = 0;
     1987    kMacroExp_Entire  = 1; ##< Entire block (iBeginLine == iEndLine), original line may contain multiple blocks.
     1988    kMacroExp_Partial = 2; ##< Partial/mixed (cmpxchg16b), safe to assume single block.
     1989    ## @}
    19801990
    19811991    def __init__(self, sSrcFile, iBeginLine, offBeginLine, oFunction, iInFunction, cchIndent = None):
     
    20012011        ##< The raw lines the block is made up of.
    20022012        self.asLines      = []              # type: List[str]
     2013        ## Indicates whether the block includes macro expansion parts (kMacroExp_None,
     2014        ## kMacroExp_Entrie, kMacroExp_Partial).
     2015        self.iMacroExp    = self.kMacroExp_None;
    20032016        ## IEM_MC_BEGIN: Argument count.
    20042017        self.cArgs        = -1;
     
    48704883
    48714884        #
    4872         # HACK ALERT! For blocks orginating from macro expansion the start and
     4885        # HACK ALERT! For blocks originating from macro expansion the start and
    48734886        #             end line will be the same, but the line has multiple
    48744887        #             newlines inside it.  So, we have to do some extra tricks
     
    48804893            if not asLines[0].strip().startswith('IEM_MC_BEGIN'):
    48814894                self.raiseError('IEM_MC_BEGIN is not the first word on the line');
     4895
     4896            # Hack alert! Detect mixed tail/head macros a la cmpxchg16b and split up the lines
     4897            #             so we can deal correctly with IEM_MC_END below and everything else.
     4898            for sLine in asLines:
     4899                cNewLines = sLine.count('\n');
     4900                assert cNewLines > 0;
     4901                if cNewLines > 1:
     4902                    asLines = self.extractLinesFromMacroExpansionLine(''.join(asLines),
     4903                                                                      self.oCurMcBlock.offBeginLine,
     4904                                                                        offEndStatementInLine
     4905                                                                      + sum(len(s) for s in asLines)
     4906                                                                      - len(asLines[-1]));
     4907                    self.oCurMcBlock.iMacroExp = McBlock.kMacroExp_Partial;
     4908                    break;
    48824909        else:
     4910            self.oCurMcBlock.iMacroExp = McBlock.kMacroExp_Entire;
    48834911            asLines = self.extractLinesFromMacroExpansionLine(self.asLines[self.iLine - 1],
    48844912                                                              self.oCurMcBlock.offBeginLine, offEndStatementInLine);
     
    52075235        # nested macro expansion, just to avoid lots of extra work.
    52085236        #
     5237        # There is only limited support for macros expanding to partial MC blocks.
     5238        #
    52095239        # Note! IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX and other macros someone making
    52105240        #       use of IEMOP_RAISE_INVALID_LOCK_PREFIX_RET() will be ignored here and
     
    52145244        #       Also, this way we don't produce lots of unnecessary threaded functions.
    52155245        #
    5216         if sBody.find("IEM_MC_BEGIN") < 0:
     5246        if sBody.find("IEM_MC_BEGIN") < 0 and sBody.find("IEM_MC_END") < 0:
    52175247            #self.debug('workerPreProcessDefine: irrelevant (%s: %s)' % (sName, sBody));
    52185248            return True;
  • trunk/src/VBox/VMM/VMMAll/IEMAllInstTwoByte0f.cpp.h

    r101484 r101722  
    1242812428    if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fCmpXchg16b)
    1242912429    {
    12430         IEM_MC_BEGIN(4, 3, IEM_MC_F_64BIT, 0);
    12431         IEM_MC_ARG(PRTUINT128U, pu128MemDst,     0);
    12432         IEM_MC_ARG(PRTUINT128U, pu128RaxRdx,     1);
    12433         IEM_MC_ARG(PRTUINT128U, pu128RbxRcx,     2);
    12434         IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 3);
    12435         IEM_MC_LOCAL(RTUINT128U, u128RaxRdx);
    12436         IEM_MC_LOCAL(RTUINT128U, u128RbxRcx);
    12437         IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
    12438 
    12439         IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
    12440         IEMOP_HLP_DONE_DECODING();
    12441         IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(GCPtrEffDst, 16);
    12442         IEM_MC_MEM_MAP(pu128MemDst, IEM_ACCESS_DATA_RW, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/);
    12443 
    12444         IEM_MC_FETCH_GREG_U64(u128RaxRdx.s.Lo, X86_GREG_xAX);
    12445         IEM_MC_FETCH_GREG_U64(u128RaxRdx.s.Hi, X86_GREG_xDX);
    12446         IEM_MC_REF_LOCAL(pu128RaxRdx, u128RaxRdx);
    12447 
    12448         IEM_MC_FETCH_GREG_U64(u128RbxRcx.s.Lo, X86_GREG_xBX);
    12449         IEM_MC_FETCH_GREG_U64(u128RbxRcx.s.Hi, X86_GREG_xCX);
    12450         IEM_MC_REF_LOCAL(pu128RbxRcx, u128RbxRcx);
    12451 
    12452         IEM_MC_FETCH_EFLAGS(EFlags);
     12430        /*
     12431         * This is hairy, very hairy macro fun.   We're walking a fine line
     12432         * here to make the code parsable by IEMAllInstPython.py and fit into
     12433         * the patterns IEMAllThrdPython.py requires for the code morphing.
     12434         */
     12435#define BODY_CMPXCHG16B_HEAD \
     12436            IEM_MC_BEGIN(4, 3, IEM_MC_F_64BIT, 0); \
     12437            IEM_MC_ARG(PRTUINT128U, pu128MemDst,     0); \
     12438            IEM_MC_ARG(PRTUINT128U, pu128RaxRdx,     1); \
     12439            IEM_MC_ARG(PRTUINT128U, pu128RbxRcx,     2); \
     12440            IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 3); \
     12441            IEM_MC_LOCAL(RTUINT128U, u128RaxRdx); \
     12442            IEM_MC_LOCAL(RTUINT128U, u128RbxRcx); \
     12443            IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \
     12444            \
     12445            IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); \
     12446            IEMOP_HLP_DONE_DECODING(); \
     12447            IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(GCPtrEffDst, 16); \
     12448            IEM_MC_MEM_MAP(pu128MemDst, IEM_ACCESS_DATA_RW, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/); \
     12449            \
     12450            IEM_MC_FETCH_GREG_U64(u128RaxRdx.s.Lo, X86_GREG_xAX); \
     12451            IEM_MC_FETCH_GREG_U64(u128RaxRdx.s.Hi, X86_GREG_xDX); \
     12452            IEM_MC_REF_LOCAL(pu128RaxRdx, u128RaxRdx); \
     12453            \
     12454            IEM_MC_FETCH_GREG_U64(u128RbxRcx.s.Lo, X86_GREG_xBX); \
     12455            IEM_MC_FETCH_GREG_U64(u128RbxRcx.s.Hi, X86_GREG_xCX); \
     12456            IEM_MC_REF_LOCAL(pu128RbxRcx, u128RbxRcx); \
     12457            \
     12458            IEM_MC_FETCH_EFLAGS(EFlags)
     12459
     12460#define BODY_CMPXCHG16B_TAIL \
     12461            IEM_MC_MEM_COMMIT_AND_UNMAP(pu128MemDst, IEM_ACCESS_DATA_RW); \
     12462            IEM_MC_COMMIT_EFLAGS(EFlags); \
     12463            IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_ZF) { \
     12464                IEM_MC_STORE_GREG_U64(X86_GREG_xAX, u128RaxRdx.s.Lo); \
     12465                IEM_MC_STORE_GREG_U64(X86_GREG_xDX, u128RaxRdx.s.Hi); \
     12466            } IEM_MC_ENDIF(); \
     12467            IEM_MC_ADVANCE_RIP_AND_FINISH(); \
     12468            IEM_MC_END()
    1245312469
    1245412470#ifdef RT_ARCH_AMD64 /* some code duplication here because IEMAllInstPython.py cannot parse if/else/#if spaghetti. */
     
    1245712473            if (   !(pVCpu->iem.s.fExec & IEM_F_X86_DISREGARD_LOCK)
    1245812474                && (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))
     12475            {
     12476                BODY_CMPXCHG16B_HEAD;
    1245912477                IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg16b_locked, pu128MemDst, pu128RaxRdx, pu128RbxRcx, pEFlags);
     12478                BODY_CMPXCHG16B_TAIL;
     12479            }
    1246012480            else
     12481            {
     12482                BODY_CMPXCHG16B_HEAD;
    1246112483                IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg16b, pu128MemDst, pu128RaxRdx, pu128RbxRcx, pEFlags);
     12484                BODY_CMPXCHG16B_TAIL;
     12485            }
    1246212486        }
    1246312487        else
    1246412488        {   /* (see comments in #else case below) */
    1246512489            if (pVCpu->CTX_SUFF(pVM)->cCpus == 1)
     12490            {
     12491                BODY_CMPXCHG16B_HEAD;
    1246612492                IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg16b_fallback, pu128MemDst, pu128RaxRdx, pu128RbxRcx, pEFlags);
     12493                BODY_CMPXCHG16B_TAIL;
     12494            }
    1246712495            else
     12496            {
     12497                BODY_CMPXCHG16B_HEAD;
    1246812498                IEM_MC_CALL_CIMPL_4(IEM_CIMPL_F_STATUS_FLAGS,
    1246912499                                    iemCImpl_cmpxchg16b_fallback_rendezvous, pu128MemDst, pu128RaxRdx, pu128RbxRcx, pEFlags);
     12500                IEM_MC_END();
     12501            }
    1247012502        }
    1247112503
     
    1247312505        /** @todo may require fallback for unaligned accesses... */
    1247412506        if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))
     12507        {
     12508            BODY_CMPXCHG16B_HEAD;
    1247512509            IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg16b, pu128MemDst, pu128RaxRdx, pu128RbxRcx, pEFlags);
     12510            BODY_CMPXCHG16B_TAIL;
     12511        }
    1247612512        else
     12513        {
     12514            BODY_CMPXCHG16B_HEAD;
    1247712515            IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg16b_locked, pu128MemDst, pu128RaxRdx, pu128RbxRcx, pEFlags);
     12516            BODY_CMPXCHG16B_TAIL;
     12517        }
    1247812518
    1247912519#else
     
    1248312523                 but to use a rendezvous callback here.  Sigh. */
    1248412524        if (pVCpu->CTX_SUFF(pVM)->cCpus == 1)
     12525        {
     12526            BODY_CMPXCHG16B_HEAD;
    1248512527            IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg16b_fallback, pu128MemDst, pu128RaxRdx, pu128RbxRcx, pEFlags);
     12528            BODY_CMPXCHG16B_TAIL;
     12529        }
    1248612530        else
    1248712531        {
    12488             IEM_MC_CALL_CIMPL_4(IEM_CIMPL_F_STATUS_FLAGS,
    12489                                 iemCImpl_cmpxchg16b_fallback_rendezvous, pu128MemDst, pu128RaxRdx, pu128RbxRcx, pEFlags);
     12532            BODY_CMPXCHG16B_HEAD;
     12533            IEM_MC_CALL_CIMPL_4(IEM_CIMPL_F_STATUS_FLAGS, iemCImpl_cmpxchg16b_fallback_rendezvous,
     12534                                pu128MemDst, pu128RaxRdx, pu128RbxRcx, pEFlags);
     12535            IEM_MC_END();
    1249012536            /* Does not get here, tail code is duplicated in iemCImpl_cmpxchg16b_fallback_rendezvous. */
    1249112537        }
    1249212538#endif
    1249312539
    12494         IEM_MC_MEM_COMMIT_AND_UNMAP(pu128MemDst, IEM_ACCESS_DATA_RW);
    12495         IEM_MC_COMMIT_EFLAGS(EFlags);
    12496         IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_ZF) {
    12497             IEM_MC_STORE_GREG_U64(X86_GREG_xAX, u128RaxRdx.s.Lo);
    12498             IEM_MC_STORE_GREG_U64(X86_GREG_xDX, u128RaxRdx.s.Hi);
    12499         } IEM_MC_ENDIF();
    12500         IEM_MC_ADVANCE_RIP_AND_FINISH();
    12501 
    12502         IEM_MC_END();
     12540#undef BODY_CMPXCHG16B
    1250312541    }
    1250412542    Log(("cmpxchg16b -> #UD\n"));
  • trunk/src/VBox/VMM/VMMAll/IEMAllN8vePython.py

    r101706 r101722  
    3838
    3939# Standard python imports:
    40 #import sys;
     40import copy;
    4141
    4242# Out python imports:
     
    196196        Returns the native recompiler function body for this threaded variant.
    197197        """
    198         aoStmts = self.oVariation.aoStmtsForThreadedFunction # type: list(McStmt)
     198        # Take the threaded function statement list and add detected
     199        # IEM_CIMPL_F_XXX flags to the IEM_MC_BEGIN statement.
     200        aoStmts = list(self.oVariation.aoStmtsForThreadedFunction) # type: list(McStmt)
     201        for iStmt, oStmt in enumerate(aoStmts):
     202            if oStmt.sName == 'IEM_MC_BEGIN' and self.oVariation.oParent.dsCImplFlags:
     203                oNewStmt = copy.deepcopy(oStmt);
     204                oNewStmt.asParams[3] = ' | '.join(sorted(self.oVariation.oParent.dsCImplFlags.keys()));
     205                aoStmts[iStmt] = oNewStmt;
     206
    199207        return iai.McStmt.renderCodeForList(aoStmts, cchIndent);
    200208
     
    272280                g_dUnsupportedMcStmtLastOneStats[sStmt] = [oVariation,];
    273281
    274     if (    len(dUnsupportedStmts) == 1
     282    if (    len(dUnsupportedStmts) in (1,2)
    275283        and iai.McStmt.findStmtByNames(aoStmts,
    276284                                       { 'IEM_MC_LOCAL': 1, 'IEM_MC_LOCAL_CONST': 1, 'IEM_MC_ARG': 1, 'IEM_MC_ARG_CONST': 1,
  • trunk/src/VBox/VMM/VMMAll/IEMAllThrdPython.py

    r101704 r101722  
    419419        'IEM_CIMPL_F_END_TB':                       True,
    420420        'IEM_CIMPL_F_XCPT':                         True,
     421        'IEM_CIMPL_F_CALLS_CIMPL':                  False,
     422        'IEM_CIMPL_F_CALLS_AIMPL':                  False,
     423        'IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE':     False,
    421424    };
    422425
     
    13311334                    self.dsCImplFlags['IEM_CIMPL_F_BRANCH_CONDITIONAL'] = True;
    13321335
     1336            # Check for CIMPL and AIMPL calls.
     1337            if oStmt.sName.startswith('IEM_MC_CALL_'):
     1338                if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
     1339                    self.dsCImplFlags['IEM_CIMPL_F_CALLS_CIMPL'] = True;
     1340                elif (   oStmt.sName.startswith('IEM_MC_CALL_VOID_AIMPL_')
     1341                      or oStmt.sName.startswith('IEM_MC_CALL_AIMPL_')
     1342                      or oStmt.sName.startswith('IEM_MC_CALL_AVX_AIMPL_')):
     1343                    self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL'] = True;
     1344                elif (   oStmt.sName.startswith('IEM_MC_CALL_SSE_AIMPL_')
     1345                      or oStmt.sName.startswith('IEM_MC_CALL_MMX_AIMPL_')
     1346                      or oStmt.sName.startswith('IEM_MC_CALL_FPU_AIMPL_')):
     1347                    self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE'] = True;
     1348                else:
     1349                    raise Exception('Unknown IEM_MC_CALL_* statement: %s' % (oStmt.sName,));
     1350
    13331351            # Process branches of conditionals recursively.
    13341352            if isinstance(oStmt, iai.McStmtCond):
     
    13611379        self.dsCImplFlags = self.oMcBlock.dsCImplFlags.copy();
    13621380        self.analyzeCodeOperation(aoStmts);
     1381        if (   ('IEM_CIMPL_F_CALLS_CIMPL' in self.dsCImplFlags)
     1382             + ('IEM_CIMPL_F_CALLS_AIMPL' in self.dsCImplFlags)
     1383             + ('IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE' in self.dsCImplFlags) > 1):
     1384            self.raiseProblem('Mixing CIMPL/AIMPL/AIMPL_WITH_FXSTATE calls');
    13631385
    13641386        # Create variations as needed.
     
    18241846            if ian.g_dUnsupportedMcStmtLastOneVarStats:
    18251847                asTopKeys = sorted(ian.g_dUnsupportedMcStmtLastOneVarStats, reverse = True,
    1826                                    key = lambda sSortKey: len(ian.g_dUnsupportedMcStmtLastOneVarStats[sSortKey]))[:10];
     1848                                   key = lambda sSortKey: len(ian.g_dUnsupportedMcStmtLastOneVarStats[sSortKey]))[:16];
    18271849                print('todo:', file = sys.stderr);
    1828                 print('todo: Top %s variations with variables and one unsupported statement dependency:' % (len(asTopKeys),),
     1850                print('todo: Top %s variations with variables and 1-2 unsupported statement dependency:' % (len(asTopKeys),),
    18291851                      file = sys.stderr);
    18301852                cchMaxKey = max([len(sKey) for sKey in asTopKeys]);
     
    18611883                cbVars = 0;
    18621884                for oVar in oThreadedFunction.oMcBlock.aoLocals:
    1863                      cbVars += (getTypeBitCount(oVar.sType) + 63) // 64 * 8;
     1885                    cbVars += (getTypeBitCount(oVar.sType) + 63) // 64 * 8;
    18641886                cbMaxVars        = max(cbMaxVars, cbVars);
    18651887                cbMaxArgs        = max(cbMaxArgs, cbArgs);
     
    23662388                #
    23672389                elif oThreadedFunction.oMcBlock.iBeginLine != oThreadedFunction.oMcBlock.iEndLine:
    2368                     assert sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1, 'sLine="%s"' % (sLine,);
     2390                    assert (   (sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1)
     2391                            or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kMacroExp_Partial), 'sLine="%s"' % (sLine,);
    23692392                    oOut.write(sLine[:oThreadedFunction.oMcBlock.offBeginLine]);
    23702393                    sModified = oThreadedFunction.generateInputCode().strip();
     
    23732396                    iLine = oThreadedFunction.oMcBlock.iEndLine;
    23742397                    sLine = oParser.asLines[iLine - 1];
    2375                     assert sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1 or len(oThreadedFunction.oMcBlock.aoStmts) == 1;
     2398                    assert (   sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1
     2399                            or len(oThreadedFunction.oMcBlock.aoStmts) == 1
     2400                            or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kMacroExp_Partial);
    23762401                    oOut.write(sLine[oThreadedFunction.oMcBlock.offAfterEnd : ]);
    23772402
  • trunk/src/VBox/VMM/include/IEMInternal.h

    r101682 r101722  
    622622#define IEM_CIMPL_F_XCPT \
    623623    (IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT)
     624
     625/** The block calls a C-implementation instruction function with two implicit arguments.
     626 * Mutually exclusive with IEM_CIMPL_F_CALLS_AIMPL and
     627 * IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE.
     628 * @note The python scripts will add this is missing.  */
     629#define IEM_CIMPL_F_CALLS_CIMPL                 RT_BIT_32(16)
     630/** The block calls an ASM-implementation instruction function.
     631 * Mutually exclusive with IEM_CIMPL_F_CALLS_CIMPL and
     632 * IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE.
     633 * @note The python scripts will add this is missing.  */
     634#define IEM_CIMPL_F_CALLS_AIMPL                 RT_BIT_32(17)
     635/** The block calls an ASM-implementation instruction function with an implicit
     636 * X86FXSTATE pointer argument.
     637 * Mutually exclusive with IEM_CIMPL_F_CALLS_CIMPL and IEM_CIMPL_F_CALLS_AIMPL.
     638 * @note The python scripts will add this is missing.  */
     639#define IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE    RT_BIT_32(18)
    624640/** @} */
    625641
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette