Changeset 54898 in vbox for trunk/src/VBox/VMM/VMMR0
- Timestamp:
- Mar 22, 2015 11:47:07 PM (10 years ago)
- svn:sync-xref-src-repo-rev:
- 99113
- Location:
- trunk/src/VBox/VMM/VMMR0
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp
r54862 r54898 488 488 * We could just all this in assembly. */ 489 489 uint128_t aGuestXmmRegs[16]; 490 memcpy(&aGuestXmmRegs[0], &pVCpu->cpum.s.Guest. fpu.aXMM[0], sizeof(aGuestXmmRegs));490 memcpy(&aGuestXmmRegs[0], &pVCpu->cpum.s.Guest.XState.x87.aXMM[0], sizeof(aGuestXmmRegs)); 491 491 #endif 492 492 … … 511 511 512 512 #ifdef VBOX_WITH_KERNEL_USING_XMM 513 memcpy(&pVCpu->cpum.s.Guest. fpu.aXMM[0], &aGuestXmmRegs[0], sizeof(aGuestXmmRegs));513 memcpy(&pVCpu->cpum.s.Guest.XState.x87.aXMM[0], &aGuestXmmRegs[0], sizeof(aGuestXmmRegs)); 514 514 #endif 515 515 } -
trunk/src/VBox/VMM/VMMR0/CPUMR0A.asm
r54674 r54898 90 90 ffree st7 ; Clear FPU stack register(7)'s tag entry to prevent overflow if a wraparound occurs 91 91 ; for the upcoming push (load) 92 fild dword [xDX + CPUMCPU.Guest. fpu] ; Explicit FPU load to overwrite FIP, FOP, FDP registers in the FPU.92 fild dword [xDX + CPUMCPU.Guest.XState] ; Explicit FPU load to overwrite FIP, FOP, FDP registers in the FPU. 93 93 94 94 .nothing_to_clean: … … 101 101 ; @remarks Requires CPUMCPU pointer in RDX 102 102 %macro SAVE_32_OR_64_FPU 0 103 o64 fxsave [rdx + CPUMCPU.Guest. fpu]103 o64 fxsave [rdx + CPUMCPU.Guest.XState] 104 104 105 105 ; Shouldn't be necessary to check if the entire 64-bit FIP is 0 (i.e. guest hasn't used its FPU yet) because it should 106 106 ; be taken care of by the calling code, i.e. hmR0[Vmx|Svm]LoadSharedCR0() and hmR0[Vmx|Svm]ExitXcptNm() which ensure 107 107 ; we swap the guest FPU state when it starts using it (#NM). In any case it's only a performance optimization. 108 ; cmp qword [rdx + CPUMCPU.Guest. fpu+ IP_OFF_IN_X86FXSTATE], 0108 ; cmp qword [rdx + CPUMCPU.Guest.XState + IP_OFF_IN_X86FXSTATE], 0 109 109 ; je short %%save_done 110 110 111 cmp dword [rdx + CPUMCPU.Guest. fpu+ CS_OFF_IN_X86FXSTATE], 0111 cmp dword [rdx + CPUMCPU.Guest.XState + CS_OFF_IN_X86FXSTATE], 0 112 112 jne short %%save_done 113 113 sub rsp, 20h ; Only need 1ch bytes but keep stack aligned otherwise we #GP(0) 114 114 fnstenv [rsp] 115 115 movzx eax, word [rsp + 10h] 116 mov [rdx + CPUMCPU.Guest. fpu+ CS_OFF_IN_X86FXSTATE], eax116 mov [rdx + CPUMCPU.Guest.XState + CS_OFF_IN_X86FXSTATE], eax 117 117 movzx eax, word [rsp + 18h] 118 mov [rdx + CPUMCPU.Guest. fpu+ DS_OFF_IN_X86FXSTATE], eax118 mov [rdx + CPUMCPU.Guest.XState + DS_OFF_IN_X86FXSTATE], eax 119 119 add rsp, 20h 120 mov dword [rdx + CPUMCPU.Guest. fpu+ X86_OFF_FXSTATE_RSVD], X86_FXSTATE_RSVD_32BIT_MAGIC120 mov dword [rdx + CPUMCPU.Guest.XState + X86_OFF_FXSTATE_RSVD], X86_FXSTATE_RSVD_32BIT_MAGIC 121 121 %%save_done: 122 122 %endmacro … … 127 127 ; @remarks Requires CPUMCPU pointer in RDX 128 128 %macro RESTORE_32_OR_64_FPU 0 129 cmp dword [rdx + CPUMCPU.Guest. fpu+ X86_OFF_FXSTATE_RSVD], X86_FXSTATE_RSVD_32BIT_MAGIC129 cmp dword [rdx + CPUMCPU.Guest.XState + X86_OFF_FXSTATE_RSVD], X86_FXSTATE_RSVD_32BIT_MAGIC 130 130 jne short %%restore_64bit_fpu 131 fxrstor [rdx + CPUMCPU.Guest. fpu]131 fxrstor [rdx + CPUMCPU.Guest.XState] 132 132 jmp short %%restore_fpu_done 133 133 %%restore_64bit_fpu: 134 o64 fxrstor [rdx + CPUMCPU.Guest. fpu]134 o64 fxrstor [rdx + CPUMCPU.Guest.XState] 135 135 %%restore_fpu_done: 136 136 %endmacro … … 201 201 %ifdef RT_ARCH_AMD64 202 202 ; Use explicit REX prefix. See @bugref{6398}. 203 o64 fxsave [rdx + CPUMCPU.Host. fpu]; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor (safe assumption)203 o64 fxsave [rdx + CPUMCPU.Host.XState] ; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor (safe assumption) 204 204 205 205 ; Restore the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}. 206 206 test dword [rdx + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE 207 207 jnz short .fpu_load_32_or_64 208 fxrstor [rdx + CPUMCPU.Guest. fpu]208 fxrstor [rdx + CPUMCPU.Guest.XState] 209 209 jmp short .fpu_load_done 210 210 .fpu_load_32_or_64: … … 212 212 .fpu_load_done: 213 213 %else 214 fxsave [edx + CPUMCPU.Host. fpu]; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor (safe assumption)215 fxrstor [edx + CPUMCPU.Guest. fpu]214 fxsave [edx + CPUMCPU.Host.XState] ; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor (safe assumption) 215 fxrstor [edx + CPUMCPU.Guest.XState] 216 216 %endif 217 217 218 218 %ifdef VBOX_WITH_KERNEL_USING_XMM 219 219 ; Restore the non-volatile xmm registers. ASSUMING 64-bit windows 220 lea r11, [xDX + CPUMCPU.Host. fpu+ XMM_OFF_IN_X86FXSTATE]220 lea r11, [xDX + CPUMCPU.Host.XState + XMM_OFF_IN_X86FXSTATE] 221 221 movdqa xmm6, [r11 + 060h] 222 222 movdqa xmm7, [r11 + 070h] … … 243 243 .sixtyfourbit_mode: 244 244 and edx, 0ffffffffh 245 o64 fxsave [rdx + CPUMCPU.Host. fpu]245 o64 fxsave [rdx + CPUMCPU.Host.XState] 246 246 247 247 ; Restore the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}. 248 248 test dword [rdx + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE 249 249 jnz short .fpu_load_32_or_64_darwin 250 fxrstor [rdx + CPUMCPU.Guest. fpu]250 fxrstor [rdx + CPUMCPU.Guest.XState] 251 251 jmp short .fpu_load_done_darwin 252 252 .fpu_load_32_or_64_darwin: … … 284 284 ; Do NOT use xCX from this point! 285 285 286 fxsave [xDX + CPUMCPU.Host. fpu]; ASSUMES that all VT-x/AMD-V boxes support fxsave/fxrstor (safe assumption)286 fxsave [xDX + CPUMCPU.Host.XState] ; ASSUMES that all VT-x/AMD-V boxes support fxsave/fxrstor (safe assumption) 287 287 288 288 ; Restore CR0 from xCX if it was saved previously. … … 340 340 test dword [rdx + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE 341 341 jnz short .fpu_save_32_or_64 342 fxsave [rdx + CPUMCPU.Guest. fpu]342 fxsave [rdx + CPUMCPU.Guest.XState] 343 343 jmp short .fpu_save_done 344 344 .fpu_save_32_or_64: … … 347 347 348 348 ; Use explicit REX prefix. See @bugref{6398}. 349 o64 fxrstor [rdx + CPUMCPU.Host. fpu]350 %else 351 fxsave [edx + CPUMCPU.Guest. fpu]; ASSUMES that all VT-x/AMD-V boxes support fxsave/fxrstor (safe assumption)352 fxrstor [edx + CPUMCPU.Host. fpu]349 o64 fxrstor [rdx + CPUMCPU.Host.XState] 350 %else 351 fxsave [edx + CPUMCPU.Guest.XState] ; ASSUMES that all VT-x/AMD-V boxes support fxsave/fxrstor (safe assumption) 352 fxrstor [edx + CPUMCPU.Host.XState] 353 353 %endif 354 354 … … 371 371 test dword [rdx + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE 372 372 jnz short .fpu_save_32_or_64_darwin 373 fxsave [rdx + CPUMCPU.Guest. fpu]373 fxsave [rdx + CPUMCPU.Guest.XState] 374 374 jmp short .fpu_save_done_darwin 375 375 .fpu_save_32_or_64_darwin: … … 377 377 .fpu_save_done_darwin: 378 378 379 o64 fxrstor [rdx + CPUMCPU.Host. fpu]379 o64 fxrstor [rdx + CPUMCPU.Host.XState] 380 380 jmp far [.fpret wrt rip] 381 381 .fpret: ; 16:32 Pointer to .the_end. … … 425 425 426 426 %ifdef RT_ARCH_AMD64 427 o64 fxrstor [xDX + CPUMCPU.Host. fpu]428 %else 429 fxrstor [xDX + CPUMCPU.Host. fpu]427 o64 fxrstor [xDX + CPUMCPU.Host.XState] 428 %else 429 fxrstor [xDX + CPUMCPU.Host.XState] 430 430 %endif 431 431 … … 444 444 .sixtyfourbit_mode: 445 445 and edx, 0ffffffffh 446 o64 fxrstor [rdx + CPUMCPU.Host. fpu]446 o64 fxrstor [rdx + CPUMCPU.Host.XState] 447 447 jmp far [.fpret wrt rip] 448 448 .fpret: ; 16:32 Pointer to .the_end. -
trunk/src/VBox/VMM/VMMR0/HMR0.cpp
r54712 r54898 1976 1976 "FPUDP=%04x DS=%04x Rsvrd2=%04x MXCSR=%08x MXCSR_MASK=%08x\n" 1977 1977 , 1978 pCtx-> fpu.FCW, pCtx->fpu.FSW, pCtx->fpu.FTW,1979 pCtx-> fpu.FOP, pCtx->fpu.FPUIP, pCtx->fpu.CS, pCtx->fpu.Rsrvd1,1980 pCtx-> fpu.FPUDP, pCtx->fpu.DS, pCtx->fpu.Rsrvd2,1981 pCtx-> fpu.MXCSR, pCtx->fpu.MXCSR_MASK));1978 pCtx->XState.x87.FCW, pCtx->XState.x87.FSW, pCtx->XState.x87.FTW, 1979 pCtx->XState.x87.FOP, pCtx->XState.x87.FPUIP, pCtx->XState.x87.CS, pCtx->XState.x87.Rsrvd1, 1980 pCtx->XState.x87.FPUDP, pCtx->XState.x87.DS, pCtx->XState.x87.Rsrvd2, 1981 pCtx->XState.x87.MXCSR, pCtx->XState.x87.MXCSR_MASK)); 1982 1982 1983 1983 Log(("MSR:\n"
Note:
See TracChangeset
for help on using the changeset viewer.