Changeset 66878 in vbox for trunk/src/VBox/VMM/VMMRZ
- Timestamp:
- May 12, 2017 12:40:17 PM (8 years ago)
- svn:sync-xref-src-repo-rev:
- 115383
- Location:
- trunk/src/VBox/VMM/VMMRZ
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMRZ/CPUMRZ.cpp
r61392 r66878 134 134 135 135 /** 136 * Makes sure the XMM0..XMM15 state in CPUMCPU::Guest is up to date.136 * Makes sure the XMM0..XMM15 and MXCSR state in CPUMCPU::Guest is up to date. 137 137 * 138 138 * This will not cause CPUM_USED_FPU_GUEST to change. … … 162 162 } 163 163 164 165 /** 166 * Makes sure the YMM0..YMM15 and MXCSR state in CPUMCPU::Guest is up to date. 167 * 168 * This will not cause CPUM_USED_FPU_GUEST to change. 169 * 170 * @param pVCpu The cross context virtual CPU structure. 171 */ 172 VMMRZ_INT_DECL(void) CPUMRZFpuStateActualizeAvxForRead(PVMCPU pVCpu) 173 { 174 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST) 175 { 176 #if defined(IN_RING0) && ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 177 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.s.Guest)) 178 { 179 Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_FPU_STATE)); 180 HMR0SaveFPUState(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.s.Guest); 181 pVCpu->cpum.s.fUseFlags |= CPUM_USED_FPU_GUEST; 182 } 183 else 184 #endif 185 cpumRZSaveGuestAvxRegisters(&pVCpu->cpum.s); 186 Log7(("CPUMRZFpuStateActualizeAvxForRead\n")); 187 } 188 } 189 -
trunk/src/VBox/VMM/VMMRZ/CPUMRZA.asm
r61368 r66878 105 105 ; @param fLeaveFpuAccessible x86:[ebp+c] gcc:sil msc:dl Whether to restore CR0 and XCR0 on 106 106 ; the way out. Only really applicable to RC. 107 ; 108 ; @remarks 64-bit Windows drivers shouldn't use AVX registers without saving+loading: 109 ; https://msdn.microsoft.com/en-us/library/windows/hardware/ff545910%28v=vs.85%29.aspx?f=255&MSPPError=-2147217396 110 ; However the compiler docs have different idea: 111 ; https://msdn.microsoft.com/en-us/library/9z1stfyw.aspx 112 ; We'll go with the former for now. 107 113 ; 108 114 align 16 … … 166 172 movdqa [pXState + X86FXSTATE.xmm14], xmm14 167 173 movdqa [pXState + X86FXSTATE.xmm15], xmm15 174 stmxcsr [pXState + X86FXSTATE.MXCSR] 168 175 169 176 ; Load the guest XMM register values we already saved in HMR0VMXStartVMWrapXMM. … … 185 192 movdqa xmm14, [pXState + X86FXSTATE.xmm14] 186 193 movdqa xmm15, [pXState + X86FXSTATE.xmm15] 194 ldmxcsr [pXState + X86FXSTATE.MXCSR] 187 195 188 196 CPUMR0_SAVE_GUEST … … 200 208 movdqa xmm14, [pXState + X86FXSTATE.xmm14] 201 209 movdqa xmm15, [pXState + X86FXSTATE.xmm15] 210 ldmxcsr [pXState + X86FXSTATE.MXCSR] 202 211 203 212 %endif … … 223 232 224 233 ;; 225 ; Saves the guest XMM0..15 registers .234 ; Saves the guest XMM0..15 registers and MXCSR. 226 235 ; 227 236 ; The purpose is to actualize the register state for read-only use, so CR0 is … … 270 279 ; Do the job. 271 280 ; 281 stmxcsr [xCX + X86FXSTATE.MXCSR] 272 282 movdqa [xCX + X86FXSTATE.xmm0 ], xmm0 273 283 movdqa [xCX + X86FXSTATE.xmm1 ], xmm1 … … 299 309 ENDPROC cpumRZSaveGuestSseRegisters 300 310 311 ;; 312 ; Saves the guest YMM0..15 registers. 313 ; 314 ; The purpose is to actualize the register state for read-only use, so CR0 is 315 ; restored in raw-mode context (so, the FPU/SSE/AVX CPU features can be 316 ; inaccessible upon return). 317 ; 318 ; @param pCpumCpu x86:[ebp+8] gcc:rdi msc:rcx CPUMCPU pointer 319 ; 320 align 16 321 BEGINPROC cpumRZSaveGuestAvxRegisters 322 push xBP 323 SEH64_PUSH_xBP 324 mov xBP, xSP 325 SEH64_SET_FRAME_xBP 0 326 %ifdef IN_RC 327 push xBX 328 %endif 329 SEH64_END_PROLOGUE 330 331 ; 332 ; Load xCX with the guest pXStateR0. 333 ; 334 %ifdef ASM_CALL64_GCC 335 mov xCX, rdi 336 %elifdef RT_ARCH_X86 337 mov xCX, dword [ebp + 8] 338 %endif 339 %ifdef IN_RING0 340 mov xCX, [xCX + CPUMCPU.Guest.pXStateR0] 341 %elifdef IN_RC 342 mov xCX, [xCX + CPUMCPU.Guest.pXStateRC] 343 %else 344 %error "Invalid context!" 345 %endif 346 347 %ifdef IN_RC 348 ; Temporarily grant access to the SSE state. xBX must be preserved until CR0 is restored! 349 mov ebx, cr0 350 test ebx, X86_CR0_TS | X86_CR0_EM 351 jz .skip_cr0_write 352 mov eax, ebx 353 and eax, ~(X86_CR0_TS | X86_CR0_EM) 354 mov cr0, eax 355 .skip_cr0_write: 356 %endif 357 358 ; 359 ; Use XSAVE to do the job. 360 ; 361 ; Drivers shouldn't use AVX registers without saving+loading: 362 ; https://msdn.microsoft.com/en-us/library/windows/hardware/ff545910%28v=vs.85%29.aspx?f=255&MSPPError=-2147217396 363 ; However the compiler docs have different idea: 364 ; https://msdn.microsoft.com/en-us/library/9z1stfyw.aspx 365 ; We'll go with the former for now. 366 ; 367 %ifdef VBOX_WITH_KERNEL_USING_XMM 368 mov eax, XSAVE_C_YMM 369 %else 370 mov eax, XSAVE_C_YMM | XSAVE_C_SSE ; The SSE component includes MXCSR. 371 %endif 372 xor edx, edx 373 %if ARCH_BITS == 64 374 o64 xsave [xCX] 375 %else 376 xsave [xCX] 377 %endif 378 379 %ifdef IN_RC 380 CPUMRZ_RESTORE_CR0_IF_TS_OR_EM_SET ebx ; Restore CR0 if we changed it above. 381 pop xBX 382 %endif 383 leave 384 ret 385 ENDPROC cpumRZSaveGuestAvxRegisters 386
Note:
See TracChangeset
for help on using the changeset viewer.