VirtualBox

Changeset 54898 in vbox for trunk/src/VBox/VMM/VMMR0


Ignore:
Timestamp:
Mar 22, 2015 11:47:07 PM (10 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
99113
Message:

CPUMCTX,CPUMHOST: Replaced the fpu (X86FXSAVE) member with an XState (X86XSAVEAREA) member.

Location:
trunk/src/VBox/VMM/VMMR0
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp

    r54862 r54898  
    488488         *        We could just all this in assembly. */
    489489        uint128_t aGuestXmmRegs[16];
    490         memcpy(&aGuestXmmRegs[0], &pVCpu->cpum.s.Guest.fpu.aXMM[0], sizeof(aGuestXmmRegs));
     490        memcpy(&aGuestXmmRegs[0], &pVCpu->cpum.s.Guest.XState.x87.aXMM[0], sizeof(aGuestXmmRegs));
    491491#endif
    492492
     
    511511
    512512#ifdef VBOX_WITH_KERNEL_USING_XMM
    513         memcpy(&pVCpu->cpum.s.Guest.fpu.aXMM[0], &aGuestXmmRegs[0], sizeof(aGuestXmmRegs));
     513        memcpy(&pVCpu->cpum.s.Guest.XState.x87.aXMM[0], &aGuestXmmRegs[0], sizeof(aGuestXmmRegs));
    514514#endif
    515515    }
  • trunk/src/VBox/VMM/VMMR0/CPUMR0A.asm

    r54674 r54898  
    9090    ffree   st7              ; Clear FPU stack register(7)'s tag entry to prevent overflow if a wraparound occurs
    9191                             ; for the upcoming push (load)
    92     fild    dword [xDX + CPUMCPU.Guest.fpu] ; Explicit FPU load to overwrite FIP, FOP, FDP registers in the FPU.
     92    fild    dword [xDX + CPUMCPU.Guest.XState] ; Explicit FPU load to overwrite FIP, FOP, FDP registers in the FPU.
    9393
    9494.nothing_to_clean:
     
    101101; @remarks Requires CPUMCPU pointer in RDX
    102102%macro SAVE_32_OR_64_FPU 0
    103     o64 fxsave  [rdx + CPUMCPU.Guest.fpu]
     103    o64 fxsave  [rdx + CPUMCPU.Guest.XState]
    104104
    105105    ; Shouldn't be necessary to check if the entire 64-bit FIP is 0 (i.e. guest hasn't used its FPU yet) because it should
    106106    ; be taken care of by the calling code, i.e. hmR0[Vmx|Svm]LoadSharedCR0() and hmR0[Vmx|Svm]ExitXcptNm() which ensure
    107107    ; we swap the guest FPU state when it starts using it (#NM). In any case it's only a performance optimization.
    108     ; cmp         qword [rdx + CPUMCPU.Guest.fpu + IP_OFF_IN_X86FXSTATE], 0
     108    ; cmp         qword [rdx + CPUMCPU.Guest.XState + IP_OFF_IN_X86FXSTATE], 0
    109109    ; je          short %%save_done
    110110
    111     cmp         dword [rdx + CPUMCPU.Guest.fpu + CS_OFF_IN_X86FXSTATE], 0
     111    cmp         dword [rdx + CPUMCPU.Guest.XState + CS_OFF_IN_X86FXSTATE], 0
    112112    jne         short %%save_done
    113113    sub         rsp, 20h                         ; Only need 1ch bytes but keep stack aligned otherwise we #GP(0)
    114114    fnstenv     [rsp]
    115115    movzx       eax, word [rsp + 10h]
    116     mov         [rdx + CPUMCPU.Guest.fpu + CS_OFF_IN_X86FXSTATE], eax
     116    mov         [rdx + CPUMCPU.Guest.XState + CS_OFF_IN_X86FXSTATE], eax
    117117    movzx       eax, word [rsp + 18h]
    118     mov         [rdx + CPUMCPU.Guest.fpu + DS_OFF_IN_X86FXSTATE], eax
     118    mov         [rdx + CPUMCPU.Guest.XState + DS_OFF_IN_X86FXSTATE], eax
    119119    add         rsp, 20h
    120     mov         dword [rdx + CPUMCPU.Guest.fpu + X86_OFF_FXSTATE_RSVD], X86_FXSTATE_RSVD_32BIT_MAGIC
     120    mov         dword [rdx + CPUMCPU.Guest.XState + X86_OFF_FXSTATE_RSVD], X86_FXSTATE_RSVD_32BIT_MAGIC
    121121%%save_done:
    122122%endmacro
     
    127127; @remarks Requires CPUMCPU pointer in RDX
    128128%macro RESTORE_32_OR_64_FPU 0
    129     cmp         dword [rdx + CPUMCPU.Guest.fpu + X86_OFF_FXSTATE_RSVD], X86_FXSTATE_RSVD_32BIT_MAGIC
     129    cmp         dword [rdx + CPUMCPU.Guest.XState + X86_OFF_FXSTATE_RSVD], X86_FXSTATE_RSVD_32BIT_MAGIC
    130130    jne         short %%restore_64bit_fpu
    131     fxrstor     [rdx + CPUMCPU.Guest.fpu]
     131    fxrstor     [rdx + CPUMCPU.Guest.XState]
    132132    jmp         short %%restore_fpu_done
    133133%%restore_64bit_fpu:
    134     o64 fxrstor [rdx + CPUMCPU.Guest.fpu]
     134    o64 fxrstor [rdx + CPUMCPU.Guest.XState]
    135135%%restore_fpu_done:
    136136%endmacro
     
    201201%ifdef RT_ARCH_AMD64
    202202    ; Use explicit REX prefix. See @bugref{6398}.
    203     o64 fxsave  [rdx + CPUMCPU.Host.fpu]    ; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor (safe assumption)
     203    o64 fxsave  [rdx + CPUMCPU.Host.XState] ; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor (safe assumption)
    204204
    205205    ; Restore the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}.
    206206    test    dword [rdx + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE
    207207    jnz     short .fpu_load_32_or_64
    208     fxrstor [rdx + CPUMCPU.Guest.fpu]
     208    fxrstor [rdx + CPUMCPU.Guest.XState]
    209209    jmp     short .fpu_load_done
    210210.fpu_load_32_or_64:
     
    212212.fpu_load_done:
    213213%else
    214     fxsave  [edx + CPUMCPU.Host.fpu]        ; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor (safe assumption)
    215     fxrstor [edx + CPUMCPU.Guest.fpu]
     214    fxsave  [edx + CPUMCPU.Host.XState]     ; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor (safe assumption)
     215    fxrstor [edx + CPUMCPU.Guest.XState]
    216216%endif
    217217
    218218%ifdef VBOX_WITH_KERNEL_USING_XMM
    219219    ; Restore the non-volatile xmm registers. ASSUMING 64-bit windows
    220     lea     r11, [xDX + CPUMCPU.Host.fpu + XMM_OFF_IN_X86FXSTATE]
     220    lea     r11, [xDX + CPUMCPU.Host.XState + XMM_OFF_IN_X86FXSTATE]
    221221    movdqa  xmm6,  [r11 + 060h]
    222222    movdqa  xmm7,  [r11 + 070h]
     
    243243.sixtyfourbit_mode:
    244244    and     edx, 0ffffffffh
    245     o64 fxsave  [rdx + CPUMCPU.Host.fpu]
     245    o64 fxsave  [rdx + CPUMCPU.Host.XState]
    246246
    247247    ; Restore the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}.
    248248    test    dword [rdx + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE
    249249    jnz     short .fpu_load_32_or_64_darwin
    250     fxrstor [rdx + CPUMCPU.Guest.fpu]
     250    fxrstor [rdx + CPUMCPU.Guest.XState]
    251251    jmp     short .fpu_load_done_darwin
    252252.fpu_load_32_or_64_darwin:
     
    284284    ; Do NOT use xCX from this point!
    285285
    286     fxsave  [xDX + CPUMCPU.Host.fpu]    ; ASSUMES that all VT-x/AMD-V boxes support fxsave/fxrstor (safe assumption)
     286    fxsave  [xDX + CPUMCPU.Host.XState] ; ASSUMES that all VT-x/AMD-V boxes support fxsave/fxrstor (safe assumption)
    287287
    288288    ; Restore CR0 from xCX if it was saved previously.
     
    340340    test    dword [rdx + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE
    341341    jnz     short .fpu_save_32_or_64
    342     fxsave  [rdx + CPUMCPU.Guest.fpu]
     342    fxsave  [rdx + CPUMCPU.Guest.XState]
    343343    jmp     short .fpu_save_done
    344344.fpu_save_32_or_64:
     
    347347
    348348    ; Use explicit REX prefix. See @bugref{6398}.
    349     o64 fxrstor [rdx + CPUMCPU.Host.fpu]
    350 %else
    351     fxsave  [edx + CPUMCPU.Guest.fpu]       ; ASSUMES that all VT-x/AMD-V boxes support fxsave/fxrstor (safe assumption)
    352     fxrstor [edx + CPUMCPU.Host.fpu]
     349    o64 fxrstor [rdx + CPUMCPU.Host.XState]
     350%else
     351    fxsave  [edx + CPUMCPU.Guest.XState]    ; ASSUMES that all VT-x/AMD-V boxes support fxsave/fxrstor (safe assumption)
     352    fxrstor [edx + CPUMCPU.Host.XState]
    353353%endif
    354354
     
    371371    test    dword [rdx + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE
    372372    jnz     short .fpu_save_32_or_64_darwin
    373     fxsave  [rdx + CPUMCPU.Guest.fpu]
     373    fxsave  [rdx + CPUMCPU.Guest.XState]
    374374    jmp     short .fpu_save_done_darwin
    375375.fpu_save_32_or_64_darwin:
     
    377377.fpu_save_done_darwin:
    378378
    379     o64 fxrstor [rdx + CPUMCPU.Host.fpu]
     379    o64 fxrstor [rdx + CPUMCPU.Host.XState]
    380380    jmp far [.fpret wrt rip]
    381381.fpret:                                 ; 16:32 Pointer to .the_end.
     
    425425
    426426%ifdef RT_ARCH_AMD64
    427     o64 fxrstor [xDX + CPUMCPU.Host.fpu]
    428 %else
    429     fxrstor [xDX + CPUMCPU.Host.fpu]
     427    o64 fxrstor [xDX + CPUMCPU.Host.XState]
     428%else
     429    fxrstor [xDX + CPUMCPU.Host.XState]
    430430%endif
    431431
     
    444444.sixtyfourbit_mode:
    445445    and     edx, 0ffffffffh
    446     o64 fxrstor [rdx + CPUMCPU.Host.fpu]
     446    o64 fxrstor [rdx + CPUMCPU.Host.XState]
    447447    jmp far [.fpret wrt rip]
    448448.fpret:                                 ; 16:32 Pointer to .the_end.
  • trunk/src/VBox/VMM/VMMR0/HMR0.cpp

    r54712 r54898  
    19761976        "FPUDP=%04x DS=%04x Rsvrd2=%04x MXCSR=%08x MXCSR_MASK=%08x\n"
    19771977        ,
    1978         pCtx->fpu.FCW, pCtx->fpu.FSW, pCtx->fpu.FTW,
    1979         pCtx->fpu.FOP, pCtx->fpu.FPUIP, pCtx->fpu.CS, pCtx->fpu.Rsrvd1,
    1980         pCtx->fpu.FPUDP, pCtx->fpu.DS, pCtx->fpu.Rsrvd2,
    1981         pCtx->fpu.MXCSR, pCtx->fpu.MXCSR_MASK));
     1978        pCtx->XState.x87.FCW, pCtx->XState.x87.FSW, pCtx->XState.x87.FTW,
     1979        pCtx->XState.x87.FOP, pCtx->XState.x87.FPUIP, pCtx->XState.x87.CS, pCtx->XState.x87.Rsrvd1,
     1980        pCtx->XState.x87.FPUDP, pCtx->XState.x87.DS, pCtx->XState.x87.Rsrvd2,
     1981        pCtx->XState.x87.MXCSR, pCtx->XState.x87.MXCSR_MASK));
    19821982
    19831983    Log(("MSR:\n"
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette