Changeset 16108 in vbox
- Timestamp:
- Jan 21, 2009 12:16:27 AM (16 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/CPUMInternal.h
r15962 r16108 390 390 391 391 #ifdef IN_RING0 392 DECLASM(int) cpumR0SaveHostRestoreGuestFPUState(PCPUMCPU pCPUM); 392 393 DECLASM(int) cpumR0SaveGuestRestoreHostFPUState(PCPUMCPU pCPUM); 393 394 DECLASM(int) cpumR0RestoreHostFPUState(PCPUMCPU pCPUM); -
trunk/src/VBox/VMM/VMMAll/CPUMAllA.asm
r15416 r16108 156 156 %endif 157 157 %ifndef RT_ARCH_AMD64 158 mov eax, edx 159 ; Calculate the PCPUM pointer 158 mov eax, edx ; Calculate the PCPUM pointer 160 159 sub eax, [edx + CPUMCPU.ulOffCPUM] 161 160 test dword [eax + CPUM.CPUFeatures.edx], X86_CPUID_FEATURE_EDX_FXSR … … 179 178 fnsave [xDX + CPUMCPU.Host.fpu] 180 179 or dword [xDX + CPUMCPU.fUseFlags], dword (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM) ; yasm / nasm 181 mov eax, [xDX + CPUMCPU.Guest.fpu] ; control word180 mov eax, [xDX + CPUMCPU.Guest.fpu] ; control word 182 181 not eax ; 1 means exception ignored (6 LS bits) 183 182 and eax, byte 03Fh ; 6 LS bits only 184 test eax, [xDX + CPUMCPU.Guest.fpu + 4] ; status word183 test eax, [xDX + CPUMCPU.Guest.fpu + 4] ; status word 185 184 jz short hlfpua_no_exceptions_pending 186 185 ; technically incorrect, but we certainly don't want any exceptions now!! -
trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp
r16106 r16108 171 171 if (CPUMIsGuestInLongModeEx(pCtx)) 172 172 { 173 /* Restore the state on entry as we need to be in 64 bits mode to access the full state. */174 pVCpu->cpum.s.fUseFlags |= CPUM_SYNC_FPU_STATE ;173 /* Save/Restore the state on entry as we need to be in 64 bits mode to access the full state. */ 174 pVCpu->cpum.s.fUseFlags |= CPUM_SYNC_FPU_STATE | CPUM_USED_FPU; 175 175 } 176 176 else … … 178 178 { 179 179 #ifndef CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE 180 # ifdef VBOX_WITH_HYBRID_32BIT_KERNEL /** @todo remove the #else here and move cpumHandleLazyFPUAsm back to VMMGC after branching out 2.1. */ 181 /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */ 182 uint64_t SavedEFER = 0; 183 if (pVM->cpum.s.CPUFeaturesExt.edx & X86_CPUID_AMD_FEATURE_EDX_FFXSR) 184 { 185 SavedEFER = ASMRdMsr(MSR_K6_EFER); 186 if (SavedEFER & MSR_K6_EFER_FFXSR) 187 { 188 ASMWrMsr(MSR_K6_EFER, SavedEFER & ~MSR_K6_EFER_FFXSR); 189 pVCpu->cpum.s.fUseFlags |= CPUM_MANUAL_XMM_RESTORE; 190 } 191 } 192 193 /* Do the job and record that we've switched FPU state. */ 194 cpumR0SaveHostRestoreGuestFPUState(&pVCpu->cpum.s); 195 pVCpu->cpum.s.fUseFlags |= CPUM_USED_FPU; 196 197 /* Restore EFER. */ 198 if (pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE) 199 ASMWrMsr(MSR_K6_EFER, SavedEFER); 200 201 # else 180 202 uint64_t oldMsrEFERHost = 0; 181 203 uint32_t oldCR0 = ASMGetCR0(); … … 204 226 ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost); 205 227 206 228 /* CPUMHandleLazyFPU could have changed CR0; restore it. */ 207 229 ASMSetCR0(oldCR0); 230 # endif 208 231 209 232 #else /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */ … … 236 259 } 237 260 } 261 238 262 #endif /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */ 239 263 } 240 264 241 pVCpu->cpum.s.fUseFlags |= CPUM_USED_FPU; 265 pVCpu->cpum.s.fUseFlags |= CPUM_USED_FPU; /** @todo clean up, this is done above by the ASM worker. */ 242 266 return VINF_SUCCESS; 243 267 } -
trunk/src/VBox/VMM/VMMR0/CPUMR0A.asm
r15417 r16108 66 66 67 67 ;; 68 ; Restores the host's FPU/XMM state68 ; Saves the guest FPU/XMM state and restores the host one. 69 69 ; 70 70 ; @returns 0 … … 72 72 ; 73 73 align 16 74 BEGINPROC cpumR0SaveHostRestoreGuestFPUState 75 %ifdef RT_ARCH_AMD64 76 %ifdef RT_OS_WINDOWS 77 mov xDX, rcx 78 %else 79 mov xDX, rdi 80 %endif 81 %else 82 mov xDX, dword [esp + 4] 83 %endif 84 85 ; Switch the state. 86 or dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU 87 88 mov xAX, cr0 ; Make sure its safe to access the FPU state. 89 mov xCX, xAX ; save old CR0 90 and xAX, ~(X86_CR0_TS | X86_CR0_EM) 91 mov cr0, xAX ;; @todo optimize this. 92 93 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 94 cmp byte [NAME(g_fCPUMIs64bitHost)], 0 95 jz .legacy_mode 96 db 0xea ; jmp far .sixtyfourbit_mode 97 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS) 98 .legacy_mode: 99 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL 100 101 fxsave [xDX + CPUMCPU.Host.fpu] ; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor 102 fxrstor [xDX + CPUMCPU.Guest.fpu] 103 104 .done: 105 mov cr0, xCX ; and restore old CR0 again ;; @todo optimize this. 106 .fpu_not_used: 107 xor eax, eax 108 ret 109 110 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0 111 ALIGNCODE(16) 112 BITS 64 113 .sixtyfourbit_mode: 114 and edx, 0ffffffffh 115 fxsave [rdx + CPUMCPU.Host.fpu] 116 fxrstor [rdx + CPUMCPU.Guest.fpu] 117 jmp far [.fpret wrt rip] 118 .fpret: ; 16:32 Pointer to .the_end. 119 dd .done, NAME(SUPR0AbsKernelCS) 120 BITS 32 121 %endif 122 ENDPROC cpumR0SaveHostRestoreGuestFPUState 123 124 125 ;; 126 ; Saves the guest FPU/XMM state and restores the host one. 127 ; 128 ; @returns 0 129 ; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer 130 ; 131 align 16 74 132 BEGINPROC cpumR0SaveGuestRestoreHostFPUState 75 133 %ifdef RT_ARCH_AMD64 … … 83 141 %endif 84 142 85 ; Restore FPU if guest has used it.143 ; Only restore FPU if guest has used it. 86 144 ; Using fxrstor should ensure that we're not causing unwanted exception on the host. 87 145 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU 88 146 jz short .fpu_not_used 89 147 90 mov xAX, cr0 148 mov xAX, cr0 ; Make sure it's safe to access the FPU state. 91 149 mov xCX, xAX ; save old CR0 92 150 and xAX, ~(X86_CR0_TS | X86_CR0_EM) 93 mov cr0, xAX 94 95 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 96 cmp byte [NAME(g_fCPUMIs64bitHost)], 0 97 jz .legacy_mode 98 db 0xea ; jmp far .sixtyfourbit_mode 99 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS) 100 .legacy_mode: 101 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL 102 103 fxsave [xDX + CPUMCPU.Guest.fpu] 151 mov cr0, xAX ;; @todo optimize this. 152 153 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 154 cmp byte [NAME(g_fCPUMIs64bitHost)], 0 155 jz .legacy_mode 156 db 0xea ; jmp far .sixtyfourbit_mode 157 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS) 158 .legacy_mode: 159 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL 160 161 fxsave [xDX + CPUMCPU.Guest.fpu] ; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor 104 162 fxrstor [xDX + CPUMCPU.Host.fpu] 105 163 106 164 .done: 107 mov cr0, xCX ; and restore old CR0 again 165 mov cr0, xCX ; and restore old CR0 again ;; @todo optimize this. 108 166 and dword [xDX + CPUMCPU.fUseFlags], ~CPUM_USED_FPU 109 167 .fpu_not_used: … … 124 182 %endif 125 183 ENDPROC cpumR0SaveGuestRestoreHostFPUState 184 126 185 127 186 ;;
Note:
See TracChangeset
for help on using the changeset viewer.