VirtualBox

Changeset 61348 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
May 31, 2016 5:59:34 PM (9 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
107643
Message:

CPUM,VMM: Touch the FPU state before doing HM on all platforms which allows us do (VMM_R0_TOUCH_FPU, see Makefile.kmk). No special treatment of win.amd64 (could save a CR0 read, maybe). Cleaned up the fix from this morning.

Location:
trunk/src/VBox/VMM
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/Makefile.kmk

    r61339 r61348  
    568568  VMMR0_DEFS    += VMM_R0_SWITCH_STACK
    569569 endif
     570 if1of ($(KBUILD_TARGET), darwin linux win)
     571  VMMR0_DEFS    += VMM_R0_TOUCH_FPU
     572 endif
    570573 VMMR0_DEFS.darwin = VMM_R0_SWITCH_STACK
    571574 VMMR0_DEFS.win.amd64  = VBOX_WITH_KERNEL_USING_XMM
  • trunk/src/VBox/VMM/VMMR0/CPUMR0A.asm

    r61317 r61348  
    4949SEH64_END_PROLOGUE
    5050
    51 %ifdef CPUM_CAN_USE_FPU_IN_R0
    52         movaps  xmm0, xmm0
     51%ifdef VMM_R0_TOUCH_FPU
     52        movdqa  xmm0, xmm0              ; hope this is harmless.
    5353%endif
    5454
     
    6060
    6161
     62%ifdef VMM_R0_TOUCH_FPU
     63;;
     64; Touches the host FPU state.
     65;
     66; @uses nothing (well, maybe cr0)
     67;
     68ALIGNCODE(16)
     69BEGINPROC CPUMR0TouchHostFpu
     70        push    xBP
     71        SEH64_PUSH_xBP
     72        mov     xBP, xSP
     73        SEH64_SET_FRAME_xBP 0
     74SEH64_END_PROLOGUE
     75
     76        movdqa  xmm0, xmm0              ; Hope this is harmless.
     77
     78        leave
     79        ret
     80ENDPROC   CPUMR0TouchHostFpu
     81%endif ; VMM_R0_TOUCH_FPU
     82
     83
    6284;;
    6385; Saves the host FPU/SSE/AVX state and restores the guest FPU/SSE/AVX state.
     
    6688; @param    pCpumCpu  x86:[ebp+8] gcc:rdi msc:rcx     CPUMCPU pointer
    6789;
    68 align 16
     90ALIGNCODE(16)
    6991BEGINPROC cpumR0SaveHostRestoreGuestFPUState
    7092        push    xBP
     
    102124        jnz     .already_saved_host
    103125
    104 %ifndef CPUM_CAN_USE_FPU_IN_R0
    105         ; On systems where the kernel doesn't necessarily allow us to use the FPU
    106         ; in ring-0 context, we have to disable FPU traps before doing fxsave/xsave
    107         ; here.  (xCX is 0 if no CR0 was necessary.)  We leave it like that so IEM
    108         ; can use the FPU/SSE/AVX host CPU features directly.
    109         SAVE_CR0_CLEAR_FPU_TRAPS xCX, xAX
    110         mov     [pCpumCpu + CPUMCPU.Host.cr0Fpu], xCX
    111         ;; @todo What about XCR0?
    112 %endif
     126        CPUMRZ_TOUCH_FPU_CLEAR_CR0_FPU_TRAPS_SET_RC xCX, xAX, pCpumCpu ; xCX is the return value for VT-x; xAX is scratch.
    113127
    114128        CPUMR0_SAVE_HOST
     
    157171        popf
    158172
    159 %ifndef CPUM_CAN_USE_FPU_IN_R0
    160         test    ecx, ecx
    161         jnz     .modified_cr0
    162 %endif
    163         xor     eax, eax
     173        mov     eax, ecx
    164174.return:
    165175%ifdef RT_ARCH_X86
     
    169179        leave
    170180        ret
    171 
    172 %ifndef CPUM_CAN_USE_FPU_IN_R0
    173 .modified_cr0:
    174         mov     eax, VINF_CPUM_HOST_CR0_MODIFIED
    175         jmp     .return
    176 %endif
    177181ENDPROC   cpumR0SaveHostRestoreGuestFPUState
    178182
     
    183187; @param    pCpumCpu  x86:[ebp+8] gcc:rdi msc:rcx     CPUMCPU pointer
    184188;
    185 align 16
     189ALIGNCODE(16)
    186190BEGINPROC cpumR0SaveGuestRestoreHostFPUState
    187191        push    xBP
     
    264268        CPUMR0_LOAD_HOST
    265269
    266 %ifndef CPUM_CAN_USE_FPU_IN_R0
    267270        ; Restore the CR0 value we saved in cpumR0SaveHostRestoreGuestFPUState or
    268271        ; in cpumRZSaveHostFPUState.
    269272        mov     xCX, [pCpumCpu + CPUMCPU.Host.cr0Fpu]
    270         RESTORE_CR0 xCX
    271 %endif
     273        CPUMRZ_RESTORE_CR0_IF_TS_OR_EM_SET xCX
    272274        and     dword [pCpumCpu + CPUMCPU.fUseFlags], ~(CPUM_USED_FPU_GUEST | CPUM_USED_FPU_HOST)
    273275
     
    291293; @param    pCpumCpu  x86:[ebp+8] gcc:rdi msc:rcx     CPUMCPU pointer
    292294;
    293 align 16
     295ALIGNCODE(16)
    294296BEGINPROC cpumR0RestoreHostFPUState
    295297        ;
     
    312314        CPUMR0_LOAD_HOST
    313315
    314 %ifndef CPUM_CAN_USE_FPU_IN_R0
    315316        ; Restore the CR0 value we saved in cpumR0SaveHostRestoreGuestFPUState or
    316317        ; in cpumRZSaveHostFPUState.
    317318        ;; @todo What about XCR0?
    318319        mov     xCX, [pCpumCpu + CPUMCPU.Host.cr0Fpu]
    319         RESTORE_CR0 xCX
    320 %endif
     320        CPUMRZ_RESTORE_CR0_IF_TS_OR_EM_SET xCX
     321
    321322        and     dword [pCpumCpu + CPUMCPU.fUseFlags], ~CPUM_USED_FPU_HOST
    322323        popf
  • trunk/src/VBox/VMM/VMMR0/VMMR0.cpp

    r60847 r61348  
    11261126#endif
    11271127
     1128#ifdef VMM_R0_TOUCH_FPU
     1129                /*
     1130                 * Make sure we've got the FPU state loaded so and we don't need to clear
     1131                 * CR0.TS and get out of sync with the host kernel when loading the guest
     1132                 * FPU state.  @ref sec_cpum_fpu (CPUM.cpp) and @bugref{4053}.
     1133                 */
     1134                CPUMR0TouchHostFpu();
     1135#endif
    11281136                int  rc;
    11291137                bool fPreemptRestored = false;
  • trunk/src/VBox/VMM/VMMRZ/CPUMRZA.asm

    r61317 r61348  
    7272        cli                             ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
    7373
    74 %ifndef CPUM_CAN_USE_FPU_IN_R0
    75         ;
    76         ; In raw-mode context and on systems where the kernel doesn't necessarily
    77         ; allow us to use the FPU in ring-0 context, we have to disable FPU traps
    78         ; before doing fxsave/xsave here.  (xCX is 0 if no CR0 was necessary.)  We
    79         ; leave it like that so IEM can use the FPU/SSE/AVX host CPU features directly.
    80         ;
    81         SAVE_CR0_CLEAR_FPU_TRAPS xCX, xAX               ; xCX must be preserved!
    82         ;; @todo What about XCR0?
    83  %ifdef IN_RING0
    84         mov     [pCpumCpu + CPUMCPU.Host.cr0Fpu], xCX
    85  %endif
    86 %endif
     74        ;
     75        ; We may have to update CR0, indirectly or directly.  We must report any
     76        ; changes to the VT-x code.
     77        ;
     78        CPUMRZ_TOUCH_FPU_CLEAR_CR0_FPU_TRAPS_SET_RC xCX, xAX, pCpumCpu ; xCX is the return value (xAX scratch)
     79
    8780        ;
    8881        ; Save the host state (xsave/fxsave will cause thread FPU state to be
     
    9487        popf
    9588
    96 %ifndef CPUM_CAN_USE_FPU_IN_R0
    97         ; Figure the return code.
    98         test    ecx, ecx
    99         jnz     .modified_cr0
    100 %endif
    101         xor     eax, eax
    102 .return:
    103 
     89        mov     eax, ecx                ; The return value from above.
    10490%ifdef RT_ARCH_X86
    10591        pop     esi
     
    10894        leave
    10995        ret
    110 
    111 %ifndef CPUM_CAN_USE_FPU_IN_R0
    112 .modified_cr0:
    113         mov     eax, VINF_CPUM_HOST_CR0_MODIFIED
    114         jmp     .return
    115 %endif
    11696%undef pCpumCpu
    11797%undef pXState
     
    156136
    157137 %ifdef IN_RC
    158         SAVE_CR0_CLEAR_FPU_TRAPS xCX, xAX ; xCX must be preserved until CR0 is restored!
     138        mov     ecx, cr0                ; ecx = saved cr0
     139        test    ecx, X86_CR0_TS | X86_CR0_EM
     140        jz      .skip_cr0_write
     141        mov     eax, ecx
     142        and     eax, ~(X86_CR0_TS | X86_CR0_EM)
     143        mov     cr0, ecx
     144.skip_cr0_write:
    159145 %endif
    160146
     
    221207        test    byte [ebp + 0ch], 1     ; fLeaveFpuAccessible
    222208        jz      .no_cr0_restore
    223         RESTORE_CR0 xCX
     209        CPUMRZ_RESTORE_CR0_IF_TS_OR_EM_SET ecx
    224210.no_cr0_restore:
    225211 %endif
     
    272258 %ifdef IN_RC
    273259        ; Temporarily grant access to the SSE state. xDX must be preserved until CR0 is restored!
    274         SAVE_CR0_CLEAR_FPU_TRAPS xDX, xAX
     260        mov     edx, cr0
     261        jz      .skip_cr0_write
     262        mov     eax, edx
     263        and     eax, ~(X86_CR0_TS | X86_CR0_EM)
     264        mov     cr0, ecx
     265.skip_cr0_write:
    275266 %endif
    276267
     
    298289
    299290 %ifdef IN_RC
    300         RESTORE_CR0 xDX                 ; Restore CR0 if we changed it above.
     291        CPUMRZ_RESTORE_CR0_IF_TS_OR_EM_SET edx  ; Restore CR0 if we changed it above.
    301292 %endif
    302293
  • trunk/src/VBox/VMM/include/CPUMInternal.mac

    r61317 r61348  
    3232 %define CPUM_IS_AMD64      0
    3333%endif
    34 
    35 ;; @def CPUM_CAN_USE_FPU_IN_R0
    36 ; Indicates that we can use the FPU directly in ring-0.
    37 ; Only defined in ring-0.
    38 %ifdef VBOX_WITH_KERNEL_USING_XMM
    39  ; Systems using XMM registers as part of their kernel calling convention must
    40  ; support saving and restoring the state while in ring-0.  64-bit Windows will
    41  ; always switch the FPU state when context switching.
    42  %define CPUM_CAN_USE_FPU_IN_R0 1
    43 %endif
    44 %ifdef RT_OS_WINDOWS
    45  ; 32-bit Windows will load the FPU context of the current thread (user land).
    46  %define CPUM_CAN_USE_FPU_IN_R0 1
    47 %endif
    48 %ifdef RT_OS_DARWIN
    49  ; Intel Darwin kernels will load the FPU context of the current thread (user land).
    50  ;; @todo we still need to check CR0 and tell HMVMX when CR0 changes!
    51  ;%define CPUM_CAN_USE_FPU_IN_R0 1
    52 %endif
    53 %ifdef RT_OS_LINUX
    54  ; Intel Linux kernels will load the FPU context of the current thread (user land),
    55  ; at least that what my LXR research on 2.6.18+ indicates.  It's possible this was
    56  ; done differently at some point, I seems to recall issues with it ages and ages ago.
    57  ;; @todo We still need to check CR0 and tell HMVMX when CR0 changes!
    58  ;%define CPUM_CAN_USE_FPU_IN_R0 1
    59 %endif
    60 %ifndef IN_RING0
    61  %undef CPUM_CAN_USE_FPU_IN_R0
    62 %endif
    63 
    6434
    6535
     
    588558
    589559;;
    590 ; Clears CR0.TS and CR0.EM if necessary, saving the previous result.
    591 ;
    592 ; This is used to avoid FPU exceptions when touching the FPU state.
    593 ;
    594 ; @param    %1      Register to save the old CR0 in (pass to RESTORE_CR0).
    595 ; @param    %2      Temporary scratch register.
    596 ; @uses     EFLAGS, CR0
    597 ;
    598 %macro SAVE_CR0_CLEAR_FPU_TRAPS 2
    599         xor     %1, %1
     560; Makes sure we don't trap (#NM) accessing the FPU.
     561;
     562; In ring-0 this is a bit of work since we may have try convince the host kernel
     563; to do the work for us, also, we must report any CR0 changes back to HMR0VMX
     564; via the VINF_CPUM_HOST_CR0_MODIFIED status code.
     565;
     566; If we end up clearing CR0.TS/EM ourselves in ring-0, we'll save the original
     567; value in CPUMCPU.Host.cr0Fpu.  If we don't, we'll store zero there.  (See also
     568; CPUMRZ_RESTORE_CR0_IF_TS_OR_EM_SET.)
     569;
     570; In raw-mode we will always have to clear TS and it will be recalculated
     571; elsewhere and thus needs no saving.
     572;
     573; @param    %1          Register to return the return status code in.
     574; @param    %2          Temporary scratch register.
     575; @param    %3          Ring-0 only, register pointing to the CPUMCPU structure
     576;                       of the EMT we're on.
     577; @uses     EFLAGS, CR0, %1, %2
     578;
     579%macro CPUMRZ_TOUCH_FPU_CLEAR_CR0_FPU_TRAPS_SET_RC 3
     580 %ifdef IN_RC
     581        ;
     582        ; raw-mode - always clear it.  We won't be here otherwise.
     583        ;
     584        mov     %2, cr0
     585        and     %2, ~(X86_CR0_TS | X86_CR0_EM)
     586        mov     cr0, %2
     587
     588 %else
     589        ;
     590        ; ring-0 - slightly complicated.
     591        ;
     592        xor     %1, %1                          ; 0 / VINF_SUCCESS. Wishing for no CR0 changes.
     593        mov     [%3 + CPUMCPU.Host.cr0Fpu], %1
     594
    600595        mov     %2, cr0
    601596        test    %2, X86_CR0_TS | X86_CR0_EM ; Make sure its safe to access the FPU state.
    602         jz      %%skip_cr0_write
    603         mov     %1, %2                  ; Save old CR0
     597        jz      %%no_cr0_change
     598
     599  %ifdef VMM_R0_TOUCH_FPU
     600        ; Touch the state and check that the kernel updated CR0 for us.
     601        movdqa  xmm0, xmm0
     602        mov     %2, cr0
     603        test    %2, X86_CR0_TS | X86_CR0_EM
     604        jz      %%cr0_changed
     605  %endif
     606
     607        ; Save CR0 and clear them flags ourselves.
     608        mov     [%3 + CPUMCPU.Host.cr0Fpu], %2
    604609        and     %2, ~(X86_CR0_TS | X86_CR0_EM)
    605610        mov     cr0, %2
    606 %%skip_cr0_write:
     611 %endif ; IN_RING0
     612
     613%%cr0_changed:
     614        mov     %1,  VINF_CPUM_HOST_CR0_MODIFIED
     615%%no_cr0_change:
    607616%endmacro
    608617
    609 ;;
    610 ; Restore CR0.TS and CR0.EM state if SAVE_CR0_CLEAR_FPU_TRAPS change it.
    611 ;
    612 ; @param    %1      The register that SAVE_CR0_CLEAR_FPU_TRAPS saved the old CR0 in.
    613 ;
    614 %macro RESTORE_CR0 1
    615         cmp     %1, 0
    616         je      %%skip_cr0_restore
     618
     619;;
     620; Restore CR0 if CR0.TS or CR0.EM were non-zero in the original state.
     621;
     622; @param    %1      The original state to restore (or zero).
     623;
     624%macro CPUMRZ_RESTORE_CR0_IF_TS_OR_EM_SET 1
     625        test    %1, X86_CR0_TS | X86_CR0_EM
     626        jz      %%skip_cr0_restore
    617627        mov     cr0, %1
    618628%%skip_cr0_restore:
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette