VirtualBox

Changeset 15416 in vbox


Ignore:
Timestamp:
Dec 13, 2008 5:31:06 AM (16 years ago)
Author:
vboxsync
Message:

CPUM: hybrid 32-bit kernel FPU mess.

Location:
trunk/src/VBox/VMM
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/CPUMInternal.h

    r15414 r15416  
    377377__BEGIN_DECLS
    378378
    379 DECLASM(int)      CPUMHandleLazyFPUAsm(PCPUMCPU pCPUM);
     379DECLASM(int)      cpumHandleLazyFPUAsm(PCPUMCPU pCPUM);
    380380
    381381#ifdef IN_RING0
    382 DECLASM(int)      CPUMR0SaveGuestRestoreHostFPUState(PCPUMCPU pCPUM);
    383 DECLASM(int)      CPUMR0RestoreHostFPUState(PCPUMCPU pCPUM);
    384 DECLASM(void)     CPUMR0LoadFPU(PCPUMCTX pCtx);
    385 DECLASM(void)     CPUMR0SaveFPU(PCPUMCTX pCtx);
    386 DECLASM(void)     CPUMR0LoadXMM(PCPUMCTX pCtx);
    387 DECLASM(void)     CPUMR0SaveXMM(PCPUMCTX pCtx);
    388 DECLASM(void)     CPUMR0SetFCW(uint16_t u16FCW);
    389 DECLASM(uint16_t) CPUMR0GetFCW();
    390 DECLASM(void)     CPUMR0SetMXCSR(uint32_t u32MXCSR);
    391 DECLASM(uint32_t) CPUMR0GetMXCSR();
     382DECLASM(int)      cpumR0SaveGuestRestoreHostFPUState(PCPUMCPU pCPUM);
     383DECLASM(int)      cpumR0RestoreHostFPUState(PCPUMCPU pCPUM);
     384DECLASM(void)     cpumR0LoadFPU(PCPUMCTX pCtx);
     385DECLASM(void)     cpumR0SaveFPU(PCPUMCTX pCtx);
     386DECLASM(void)     cpumR0LoadXMM(PCPUMCTX pCtx);
     387DECLASM(void)     cpumR0SaveXMM(PCPUMCTX pCtx);
     388DECLASM(void)     cpumR0SetFCW(uint16_t u16FCW);
     389DECLASM(uint16_t) cpumR0GetFCW(void);
     390DECLASM(void)     cpumR0SetMXCSR(uint32_t u32MXCSR);
     391DECLASM(uint32_t) cpumR0GetMXCSR(void);
    392392#endif
    393393
  • trunk/src/VBox/VMM/VMMAll/CPUMAllA.asm

    r14870 r15416  
    5959;
    6060align 16
    61 BEGINPROC   CPUMHandleLazyFPUAsm
     61BEGINPROC   cpumHandleLazyFPUAsm
    6262    ;
    6363    ; Figure out what to do.
     
    199199    mov     eax, VINF_EM_RAW_GUEST_TRAP
    200200    ret
    201 ENDPROC     CPUMHandleLazyFPUAsm
    202 
    203 
     201ENDPROC     cpumHandleLazyFPUAsm
     202
     203
  • trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp

    r15390 r15416  
    15171517            break;
    15181518        }
    1519        
     1519
    15201520        default:
    15211521            AssertMsgFailed(("enmFeature=%d\n", enmFeature));
     
    20832083VMMDECL(int) CPUMHandleLazyFPU(PVM pVM, PVMCPU pVCpu)
    20842084{
    2085     return CPUMHandleLazyFPUAsm(&pVCpu->cpum.s);
     2085    return cpumHandleLazyFPUAsm(&pVCpu->cpum.s);
    20862086}
    20872087
  • trunk/src/VBox/VMM/VMMGC/TRPMGCHandlersA.asm

    r11473 r15416  
    3939extern IMPNAME(g_VM)                    ; are a bit confusing at first... :-)
    4040extern NAME(CPUMGCRestoreInt)
    41 extern NAME(CPUMHandleLazyFPUAsm)
     41extern NAME(cpumHandleLazyFPUAsm)
    4242extern NAME(CPUMHyperSetCtxCore)
    4343extern NAME(trpmGCTrapInGeneric)
  • trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp

    r15414 r15416  
    178178    {
    179179#ifndef CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE
    180         uint64_t oldMsrEFERHost;
     180        uint64_t oldMsrEFERHost = 0;
    181181        uint32_t oldCR0 = ASMGetCR0();
    182182
     
    217217            pVCpu->cpum.s.Host.fpu.MXCSR = CPUMGetMXCSR();
    218218
    219         CPUMR0LoadFPU(pCtx);
     219        cpumR0LoadFPU(pCtx);
    220220
    221221        /*
     
    232232            {
    233233                /* fxrstor doesn't restore the XMM state! */
    234                 CPUMR0LoadXMM(pCtx);
     234                cpumR0LoadXMM(pCtx);
    235235                pVCpu->cpum.s.fUseFlags |= CPUM_MANUAL_XMM_RESTORE;
    236236            }
     
    264264            HWACCMR0SaveFPUState(pVM, pVCpu, pCtx);
    265265
    266         CPUMR0RestoreHostFPUState(&pVCpu->cpum.s);
     266        cpumR0RestoreHostFPUState(&pVCpu->cpum.s);
    267267    }
    268268    else
     
    270270    {
    271271#ifndef CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE
    272         uint64_t oldMsrEFERHost;
     272        uint64_t oldMsrEFERHost = 0;
    273273
    274274        /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */
     
    278278            ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost & ~MSR_K6_EFER_FFXSR);
    279279        }
    280         CPUMR0SaveGuestRestoreHostFPUState(&pVCpu->cpum.s);
     280        cpumR0SaveGuestRestoreHostFPUState(&pVCpu->cpum.s);
    281281
    282282        /* Restore EFER MSR */
     
    285285
    286286#else  /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */
    287         CPUMR0SaveFPU(pCtx);
     287        cpumR0SaveFPU(pCtx);
    288288        if (pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE)
    289289        {
    290290            /* fxsave doesn't save the XMM state! */
    291             CPUMR0SaveXMM(pCtx);
     291            cpumR0SaveXMM(pCtx);
    292292        }
    293293
     
    296296         * We don't want the guest to be able to trigger floating point/SSE exceptions on the host.
    297297         */
    298         CPUMR0SetFCW(pVCpu->cpum.s.Host.fpu.FCW);
     298        cpumR0SetFCW(pVCpu->cpum.s.Host.fpu.FCW);
    299299        if (pVM->cpum.s.CPUFeatures.edx.u1SSE)
    300             CPUMR0SetMXCSR(pVCpu->cpum.s.Host.fpu.MXCSR);
     300            cpumR0SetMXCSR(pVCpu->cpum.s.Host.fpu.MXCSR);
    301301#endif /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */
    302302    }
  • trunk/src/VBox/VMM/VMMR0/CPUMR0A.asm

    r14978 r15416  
    3535%endif
    3636
     37
     38;*******************************************************************************
     39;* External Symbols                                                            *
     40;*******************************************************************************
     41%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
     42extern NAME(SUPR0AbsIs64bit)
     43extern NAME(SUPR0Abs64bitKernelCS)
     44extern NAME(SUPR0Abs64bitKernelSS)
     45extern NAME(SUPR0Abs64bitKernelDS)
     46extern NAME(SUPR0AbsKernelCS)
     47%endif
     48
     49
     50;*******************************************************************************
     51;*  Global Variables                                                           *
     52;*******************************************************************************
     53%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
     54BEGINDATA
     55;;
     56; Store the SUPR0AbsIs64bit absolute value here so we can cmp/test without
     57; needing to clobber a register. (This trick doesn't quite work for PE btw.
     58; but that's not relevant atm.)
     59GLOBALNAME g_fCPUMIs64bitHost
     60    dd  NAME(SUPR0AbsIs64bit)
     61%endif
     62
     63
    3764BEGINCODE
    3865
     
    4572;
    4673align 16
    47 BEGINPROC CPUMR0SaveGuestRestoreHostFPUState
     74BEGINPROC cpumR0SaveGuestRestoreHostFPUState
    4875%ifdef RT_ARCH_AMD64
    4976 %ifdef RT_OS_WINDOWS
     
    5986    ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
    6087    test    dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU
    61     jz short gth_fpu_no
     88    jz short .fpu_not_used
    6289
    6390    mov     xAX, cr0
     
    6693    mov     cr0, xAX
    6794
     95%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
     96    cmp     byte [NAME(g_fCPUMIs64bitHost)], 0
     97    jz      .legacy_mode
     98    db      0xea                        ; jmp far .sixtyfourbit_mode
     99    dd      .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
     100.legacy_mode:
     101%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
     102
    68103    fxsave  [xDX + CPUMCPU.Guest.fpu]
    69104    fxrstor [xDX + CPUMCPU.Host.fpu]
    70105
     106.done:
    71107    mov     cr0, xCX                    ; and restore old CR0 again
    72108    and     dword [xDX + CPUMCPU.fUseFlags], ~CPUM_USED_FPU
    73 gth_fpu_no:
     109.fpu_not_used:
    74110    xor     eax, eax
    75111    ret
    76 ENDPROC   CPUMR0SaveGuestRestoreHostFPUState
     112
     113%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
     114ALIGNCODE(16)
     115BITS 64
     116.sixtyfourbit_mode:
     117    and     edx, 0ffffffffh
     118    fxsave  [rdx + CPUMCPU.Guest.fpu]
     119    fxrstor [rdx + CPUMCPU.Host.fpu]
     120    jmp far [.fpret wrt rip]
     121.fpret:                                 ; 16:32 Pointer to .the_end.
     122    dd      .done, NAME(SUPR0AbsKernelCS)
     123BITS 32
     124%endif
     125ENDPROC   cpumR0SaveGuestRestoreHostFPUState
    77126
    78127;;
     
    83132;
    84133align 16
    85 BEGINPROC CPUMR0RestoreHostFPUState
     134BEGINPROC cpumR0RestoreHostFPUState
    86135%ifdef RT_ARCH_AMD64
    87136 %ifdef RT_OS_WINDOWS
     
    97146    ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
    98147    test    dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU
    99     jz short gth_fpu_no_2
     148    jz short .fpu_not_used
    100149
    101150    mov     xAX, cr0
     
    104153    mov     cr0, xAX
    105154
     155%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
     156    cmp     byte [NAME(g_fCPUMIs64bitHost)], 0
     157    jz      .legacy_mode
     158    db      0xea                        ; jmp far .sixtyfourbit_mode
     159    dd      .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
     160.legacy_mode:
     161%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
     162
    106163    fxrstor [xDX + CPUMCPU.Host.fpu]
    107164
     165.done:
    108166    mov     cr0, xCX                    ; and restore old CR0 again
    109167    and     dword [xDX + CPUMCPU.fUseFlags], ~CPUM_USED_FPU
    110 gth_fpu_no_2:
     168.fpu_not_used:
    111169    xor     eax, eax
    112170    ret
    113 ENDPROC   CPUMR0RestoreHostFPUState
     171
     172%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
     173ALIGNCODE(16)
     174BITS 64
     175.sixtyfourbit_mode:
     176    and     edx, 0ffffffffh
     177    fxrstor [rdx + CPUMCPU.Host.fpu]
     178    jmp far [.fpret wrt rip]
     179.fpret:                                 ; 16:32 Pointer to .the_end.
     180    dd      .done, NAME(SUPR0AbsKernelCS)
     181BITS 32
     182%endif
     183ENDPROC   cpumR0RestoreHostFPUState
     184
    114185
    115186;;
     
    129200    mov     xDX, dword [esp + 4]
    130201%endif
     202%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
     203    cmp     byte [NAME(g_fCPUMIs64bitHost)], 0
     204    jz      .legacy_mode
     205    db      0xea                        ; jmp far .sixtyfourbit_mode
     206    dd      .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
     207.legacy_mode:
     208%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
     209
    131210    fxrstor [xDX + CPUMCTX.fpu]
    132     ret
     211.done:
     212    ret
     213
     214%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
     215ALIGNCODE(16)
     216BITS 64
     217.sixtyfourbit_mode:
     218    and     edx, 0ffffffffh
     219    fxrstor [rdx + CPUMCTX.fpu]
     220    jmp far [.fpret wrt rip]
     221.fpret:                                 ; 16:32 Pointer to .the_end.
     222    dd      .done, NAME(SUPR0AbsKernelCS)
     223BITS 32
     224%endif
    133225ENDPROC     CPUMLoadFPU
    134226
     
    150242    mov     xDX, dword [esp + 4]
    151243%endif
     244%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
     245    cmp     byte [NAME(g_fCPUMIs64bitHost)], 0
     246    jz      .legacy_mode
     247    db      0xea                        ; jmp far .sixtyfourbit_mode
     248    dd      .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
     249.legacy_mode:
     250%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
    152251    fxsave  [xDX + CPUMCTX.fpu]
    153     ret
     252.done:
     253    ret
     254
     255%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
     256ALIGNCODE(16)
     257BITS 64
     258.sixtyfourbit_mode:
     259    and     edx, 0ffffffffh
     260    fxsave  [rdx + CPUMCTX.fpu]
     261    jmp far [.fpret wrt rip]
     262.fpret:                                 ; 16:32 Pointer to .the_end.
     263    dd      .done, NAME(SUPR0AbsKernelCS)
     264BITS 32
     265%endif
    154266ENDPROC CPUMSaveFPU
    155267
     
    171283    mov     xDX, dword [esp + 4]
    172284%endif
     285%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
     286    cmp     byte [NAME(g_fCPUMIs64bitHost)], 0
     287    jz      .legacy_mode
     288    db      0xea                        ; jmp far .sixtyfourbit_mode
     289    dd      .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
     290.legacy_mode:
     291%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
     292
    173293    movdqa  xmm0, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0]
    174294    movdqa  xmm1, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1]
     
    182302%ifdef RT_ARCH_AMD64
    183303    test qword [xDX + CPUMCTX.msrEFER], MSR_K6_EFER_LMA
    184     jz CPUMLoadXMM_done
     304    jz .done
    185305
    186306    movdqa  xmm8, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8]
     
    192312    movdqa  xmm14, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*14]
    193313    movdqa  xmm15, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15]
    194 CPUMLoadXMM_done:
    195 %endif
    196 
    197     ret
     314%endif
     315.done:
     316
     317    ret
     318
     319%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
     320ALIGNCODE(16)
     321BITS 64
     322.sixtyfourbit_mode:
     323    and     edx, 0ffffffffh
     324
     325    movdqa  xmm0, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0]
     326    movdqa  xmm1, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1]
     327    movdqa  xmm2, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*2]
     328    movdqa  xmm3, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*3]
     329    movdqa  xmm4, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*4]
     330    movdqa  xmm5, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*5]
     331    movdqa  xmm6, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*6]
     332    movdqa  xmm7, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*7]
     333
     334    test qword [rdx + CPUMCTX.msrEFER], MSR_K6_EFER_LMA
     335    jz .sixtyfourbit_done
     336
     337    movdqa  xmm8,  [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8]
     338    movdqa  xmm9,  [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*9]
     339    movdqa  xmm10, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*10]
     340    movdqa  xmm11, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*11]
     341    movdqa  xmm12, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*12]
     342    movdqa  xmm13, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*13]
     343    movdqa  xmm14, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*14]
     344    movdqa  xmm15, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15]
     345.sixtyfourbit_done:
     346    jmp far [.fpret wrt rip]
     347.fpret:                                 ; 16:32 Pointer to .the_end.
     348    dd      .done, NAME(SUPR0AbsKernelCS)
     349BITS 32
     350%endif
    198351ENDPROC     CPUMLoadXMM
    199352
     
    215368    mov     xDX, dword [esp + 4]
    216369%endif
     370%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
     371    cmp     byte [NAME(g_fCPUMIs64bitHost)], 0
     372    jz      .legacy_mode
     373    db      0xea                        ; jmp far .sixtyfourbit_mode
     374    dd      .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
     375.legacy_mode:
     376%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
     377
    217378    movdqa  [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0], xmm0
    218379    movdqa  [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1], xmm1
     
    226387%ifdef RT_ARCH_AMD64
    227388    test qword [xDX + CPUMCTX.msrEFER], MSR_K6_EFER_LMA
    228     jz CPUMSaveXMM_done
     389    jz .done
    229390
    230391    movdqa  [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8], xmm8
     
    237398    movdqa  [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15], xmm15
    238399
    239 CPUMSaveXMM_done:
    240 %endif
    241     ret
     400%endif
     401.done:
     402    ret
     403
     404%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
     405ALIGNCODE(16)
     406BITS 64
     407.sixtyfourbit_mode:
     408    and     edx, 0ffffffffh
     409
     410    movdqa  [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0], xmm0
     411    movdqa  [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1], xmm1
     412    movdqa  [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*2], xmm2
     413    movdqa  [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*3], xmm3
     414    movdqa  [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*4], xmm4
     415    movdqa  [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*5], xmm5
     416    movdqa  [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*6], xmm6
     417    movdqa  [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*7], xmm7
     418
     419    test qword [rdx + CPUMCTX.msrEFER], MSR_K6_EFER_LMA
     420    jz .sixtyfourbit_done
     421
     422    movdqa  [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8], xmm8
     423    movdqa  [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*9], xmm9
     424    movdqa  [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*10], xmm10
     425    movdqa  [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*11], xmm11
     426    movdqa  [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*12], xmm12
     427    movdqa  [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*13], xmm13
     428    movdqa  [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*14], xmm14
     429    movdqa  [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15], xmm15
     430
     431.sixtyfourbit_done:
     432    jmp far [.fpret wrt rip]
     433.fpret:                                 ; 16:32 Pointer to .the_end.
     434    dd      .done, NAME(SUPR0AbsKernelCS)
     435BITS 32
     436%endif
     437
    242438ENDPROC     CPUMSaveXMM
    243439
     
    248444; @param  u16FCW    x86:[esp+4] GCC:rdi MSC:rcx     New FPU control word
    249445align 16
    250 BEGINPROC CPUMR0SetFCW
     446BEGINPROC cpumR0SetFCW
    251447%ifdef RT_ARCH_AMD64
    252448 %ifdef RT_OS_WINDOWS
     
    263459    pop     xAX
    264460    ret
    265 ENDPROC   CPUMR0SetFCW
     461ENDPROC   cpumR0SetFCW
    266462
    267463
     
    270466;
    271467align 16
    272 BEGINPROC CPUMR0GetFCW
     468BEGINPROC cpumR0GetFCW
    273469    fnstcw  [xSP - 8]
    274470    mov     ax, word [xSP - 8]
    275471    ret
    276 ENDPROC   CPUMR0GetFCW
     472ENDPROC   cpumR0GetFCW
    277473
    278474
     
    282478; @param  u32MXCSR    x86:[esp+4] GCC:rdi MSC:rcx     New MXCSR
    283479align 16
    284 BEGINPROC CPUMR0SetMXCSR
     480BEGINPROC cpumR0SetMXCSR
    285481%ifdef RT_ARCH_AMD64
    286482 %ifdef RT_OS_WINDOWS
     
    296492    pop     xAX
    297493    ret
    298 ENDPROC   CPUMR0SetMXCSR
     494ENDPROC   cpumR0SetMXCSR
    299495
    300496
     
    303499;
    304500align 16
    305 BEGINPROC CPUMR0GetMXCSR
     501BEGINPROC cpumR0GetMXCSR
    306502    stmxcsr [xSP - 8]
    307503    mov     eax, dword [xSP - 8]
    308504    ret
    309 ENDPROC   CPUMR0GetMXCSR
     505ENDPROC   cpumR0GetMXCSR
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette