Changeset 61348 in vbox for trunk/src/VBox/VMM
- Timestamp:
- May 31, 2016 5:59:34 PM (9 years ago)
- svn:sync-xref-src-repo-rev:
- 107643
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/Makefile.kmk
r61339 r61348 568 568 VMMR0_DEFS += VMM_R0_SWITCH_STACK 569 569 endif 570 if1of ($(KBUILD_TARGET), darwin linux win) 571 VMMR0_DEFS += VMM_R0_TOUCH_FPU 572 endif 570 573 VMMR0_DEFS.darwin = VMM_R0_SWITCH_STACK 571 574 VMMR0_DEFS.win.amd64 = VBOX_WITH_KERNEL_USING_XMM -
trunk/src/VBox/VMM/VMMR0/CPUMR0A.asm
r61317 r61348 49 49 SEH64_END_PROLOGUE 50 50 51 %ifdef CPUM_CAN_USE_FPU_IN_R052 mov aps xmm0, xmm051 %ifdef VMM_R0_TOUCH_FPU 52 movdqa xmm0, xmm0 ; hope this is harmless. 53 53 %endif 54 54 … … 60 60 61 61 62 %ifdef VMM_R0_TOUCH_FPU 63 ;; 64 ; Touches the host FPU state. 65 ; 66 ; @uses nothing (well, maybe cr0) 67 ; 68 ALIGNCODE(16) 69 BEGINPROC CPUMR0TouchHostFpu 70 push xBP 71 SEH64_PUSH_xBP 72 mov xBP, xSP 73 SEH64_SET_FRAME_xBP 0 74 SEH64_END_PROLOGUE 75 76 movdqa xmm0, xmm0 ; Hope this is harmless. 77 78 leave 79 ret 80 ENDPROC CPUMR0TouchHostFpu 81 %endif ; VMM_R0_TOUCH_FPU 82 83 62 84 ;; 63 85 ; Saves the host FPU/SSE/AVX state and restores the guest FPU/SSE/AVX state. … … 66 88 ; @param pCpumCpu x86:[ebp+8] gcc:rdi msc:rcx CPUMCPU pointer 67 89 ; 68 align 16 90 ALIGNCODE(16) 69 91 BEGINPROC cpumR0SaveHostRestoreGuestFPUState 70 92 push xBP … … 102 124 jnz .already_saved_host 103 125 104 %ifndef CPUM_CAN_USE_FPU_IN_R0 105 ; On systems where the kernel doesn't necessarily allow us to use the FPU 106 ; in ring-0 context, we have to disable FPU traps before doing fxsave/xsave 107 ; here. (xCX is 0 if no CR0 was necessary.) We leave it like that so IEM 108 ; can use the FPU/SSE/AVX host CPU features directly. 109 SAVE_CR0_CLEAR_FPU_TRAPS xCX, xAX 110 mov [pCpumCpu + CPUMCPU.Host.cr0Fpu], xCX 111 ;; @todo What about XCR0? 112 %endif 126 CPUMRZ_TOUCH_FPU_CLEAR_CR0_FPU_TRAPS_SET_RC xCX, xAX, pCpumCpu ; xCX is the return value for VT-x; xAX is scratch. 113 127 114 128 CPUMR0_SAVE_HOST … … 157 171 popf 158 172 159 %ifndef CPUM_CAN_USE_FPU_IN_R0 160 test ecx, ecx 161 jnz .modified_cr0 162 %endif 163 xor eax, eax 173 mov eax, ecx 164 174 .return: 165 175 %ifdef RT_ARCH_X86 … … 169 179 leave 170 180 ret 171 172 %ifndef CPUM_CAN_USE_FPU_IN_R0173 .modified_cr0:174 mov eax, VINF_CPUM_HOST_CR0_MODIFIED175 jmp .return176 %endif177 181 ENDPROC cpumR0SaveHostRestoreGuestFPUState 178 182 … … 183 187 ; @param pCpumCpu x86:[ebp+8] gcc:rdi msc:rcx CPUMCPU pointer 184 188 ; 185 align 16 189 ALIGNCODE(16) 186 190 BEGINPROC cpumR0SaveGuestRestoreHostFPUState 187 191 push xBP … … 264 268 CPUMR0_LOAD_HOST 265 269 266 %ifndef CPUM_CAN_USE_FPU_IN_R0267 270 ; Restore the CR0 value we saved in cpumR0SaveHostRestoreGuestFPUState or 268 271 ; in cpumRZSaveHostFPUState. 269 272 mov xCX, [pCpumCpu + CPUMCPU.Host.cr0Fpu] 270 RESTORE_CR0 xCX 271 %endif 273 CPUMRZ_RESTORE_CR0_IF_TS_OR_EM_SET xCX 272 274 and dword [pCpumCpu + CPUMCPU.fUseFlags], ~(CPUM_USED_FPU_GUEST | CPUM_USED_FPU_HOST) 273 275 … … 291 293 ; @param pCpumCpu x86:[ebp+8] gcc:rdi msc:rcx CPUMCPU pointer 292 294 ; 293 align 16 295 ALIGNCODE(16) 294 296 BEGINPROC cpumR0RestoreHostFPUState 295 297 ; … … 312 314 CPUMR0_LOAD_HOST 313 315 314 %ifndef CPUM_CAN_USE_FPU_IN_R0315 316 ; Restore the CR0 value we saved in cpumR0SaveHostRestoreGuestFPUState or 316 317 ; in cpumRZSaveHostFPUState. 317 318 ;; @todo What about XCR0? 318 319 mov xCX, [pCpumCpu + CPUMCPU.Host.cr0Fpu] 319 RESTORE_CR0xCX320 %endif 320 CPUMRZ_RESTORE_CR0_IF_TS_OR_EM_SET xCX 321 321 322 and dword [pCpumCpu + CPUMCPU.fUseFlags], ~CPUM_USED_FPU_HOST 322 323 popf -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r60847 r61348 1126 1126 #endif 1127 1127 1128 #ifdef VMM_R0_TOUCH_FPU 1129 /* 1130 * Make sure we've got the FPU state loaded so and we don't need to clear 1131 * CR0.TS and get out of sync with the host kernel when loading the guest 1132 * FPU state. @ref sec_cpum_fpu (CPUM.cpp) and @bugref{4053}. 1133 */ 1134 CPUMR0TouchHostFpu(); 1135 #endif 1128 1136 int rc; 1129 1137 bool fPreemptRestored = false; -
trunk/src/VBox/VMM/VMMRZ/CPUMRZA.asm
r61317 r61348 72 72 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0. 73 73 74 %ifndef CPUM_CAN_USE_FPU_IN_R0 75 ; 76 ; In raw-mode context and on systems where the kernel doesn't necessarily 77 ; allow us to use the FPU in ring-0 context, we have to disable FPU traps 78 ; before doing fxsave/xsave here. (xCX is 0 if no CR0 was necessary.) We 79 ; leave it like that so IEM can use the FPU/SSE/AVX host CPU features directly. 80 ; 81 SAVE_CR0_CLEAR_FPU_TRAPS xCX, xAX ; xCX must be preserved! 82 ;; @todo What about XCR0? 83 %ifdef IN_RING0 84 mov [pCpumCpu + CPUMCPU.Host.cr0Fpu], xCX 85 %endif 86 %endif 74 ; 75 ; We may have to update CR0, indirectly or directly. We must report any 76 ; changes to the VT-x code. 77 ; 78 CPUMRZ_TOUCH_FPU_CLEAR_CR0_FPU_TRAPS_SET_RC xCX, xAX, pCpumCpu ; xCX is the return value (xAX scratch) 79 87 80 ; 88 81 ; Save the host state (xsave/fxsave will cause thread FPU state to be … … 94 87 popf 95 88 96 %ifndef CPUM_CAN_USE_FPU_IN_R0 97 ; Figure the return code. 98 test ecx, ecx 99 jnz .modified_cr0 100 %endif 101 xor eax, eax 102 .return: 103 89 mov eax, ecx ; The return value from above. 104 90 %ifdef RT_ARCH_X86 105 91 pop esi … … 108 94 leave 109 95 ret 110 111 %ifndef CPUM_CAN_USE_FPU_IN_R0112 .modified_cr0:113 mov eax, VINF_CPUM_HOST_CR0_MODIFIED114 jmp .return115 %endif116 96 %undef pCpumCpu 117 97 %undef pXState … … 156 136 157 137 %ifdef IN_RC 158 SAVE_CR0_CLEAR_FPU_TRAPS xCX, xAX ; xCX must be preserved until CR0 is restored! 138 mov ecx, cr0 ; ecx = saved cr0 139 test ecx, X86_CR0_TS | X86_CR0_EM 140 jz .skip_cr0_write 141 mov eax, ecx 142 and eax, ~(X86_CR0_TS | X86_CR0_EM) 143 mov cr0, ecx 144 .skip_cr0_write: 159 145 %endif 160 146 … … 221 207 test byte [ebp + 0ch], 1 ; fLeaveFpuAccessible 222 208 jz .no_cr0_restore 223 RESTORE_CR0 xCX209 CPUMRZ_RESTORE_CR0_IF_TS_OR_EM_SET ecx 224 210 .no_cr0_restore: 225 211 %endif … … 272 258 %ifdef IN_RC 273 259 ; Temporarily grant access to the SSE state. xDX must be preserved until CR0 is restored! 274 SAVE_CR0_CLEAR_FPU_TRAPS xDX, xAX 260 mov edx, cr0 261 jz .skip_cr0_write 262 mov eax, edx 263 and eax, ~(X86_CR0_TS | X86_CR0_EM) 264 mov cr0, ecx 265 .skip_cr0_write: 275 266 %endif 276 267 … … 298 289 299 290 %ifdef IN_RC 300 RESTORE_CR0 xDX; Restore CR0 if we changed it above.291 CPUMRZ_RESTORE_CR0_IF_TS_OR_EM_SET edx ; Restore CR0 if we changed it above. 301 292 %endif 302 293 -
trunk/src/VBox/VMM/include/CPUMInternal.mac
r61317 r61348 32 32 %define CPUM_IS_AMD64 0 33 33 %endif 34 35 ;; @def CPUM_CAN_USE_FPU_IN_R036 ; Indicates that we can use the FPU directly in ring-0.37 ; Only defined in ring-0.38 %ifdef VBOX_WITH_KERNEL_USING_XMM39 ; Systems using XMM registers as part of their kernel calling convention must40 ; support saving and restoring the state while in ring-0. 64-bit Windows will41 ; always switch the FPU state when context switching.42 %define CPUM_CAN_USE_FPU_IN_R0 143 %endif44 %ifdef RT_OS_WINDOWS45 ; 32-bit Windows will load the FPU context of the current thread (user land).46 %define CPUM_CAN_USE_FPU_IN_R0 147 %endif48 %ifdef RT_OS_DARWIN49 ; Intel Darwin kernels will load the FPU context of the current thread (user land).50 ;; @todo we still need to check CR0 and tell HMVMX when CR0 changes!51 ;%define CPUM_CAN_USE_FPU_IN_R0 152 %endif53 %ifdef RT_OS_LINUX54 ; Intel Linux kernels will load the FPU context of the current thread (user land),55 ; at least that what my LXR research on 2.6.18+ indicates. It's possible this was56 ; done differently at some point, I seems to recall issues with it ages and ages ago.57 ;; @todo We still need to check CR0 and tell HMVMX when CR0 changes!58 ;%define CPUM_CAN_USE_FPU_IN_R0 159 %endif60 %ifndef IN_RING061 %undef CPUM_CAN_USE_FPU_IN_R062 %endif63 64 34 65 35 … … 588 558 589 559 ;; 590 ; Clears CR0.TS and CR0.EM if necessary, saving the previous result. 591 ; 592 ; This is used to avoid FPU exceptions when touching the FPU state. 593 ; 594 ; @param %1 Register to save the old CR0 in (pass to RESTORE_CR0). 595 ; @param %2 Temporary scratch register. 596 ; @uses EFLAGS, CR0 597 ; 598 %macro SAVE_CR0_CLEAR_FPU_TRAPS 2 599 xor %1, %1 560 ; Makes sure we don't trap (#NM) accessing the FPU. 561 ; 562 ; In ring-0 this is a bit of work since we may have try convince the host kernel 563 ; to do the work for us, also, we must report any CR0 changes back to HMR0VMX 564 ; via the VINF_CPUM_HOST_CR0_MODIFIED status code. 565 ; 566 ; If we end up clearing CR0.TS/EM ourselves in ring-0, we'll save the original 567 ; value in CPUMCPU.Host.cr0Fpu. If we don't, we'll store zero there. (See also 568 ; CPUMRZ_RESTORE_CR0_IF_TS_OR_EM_SET.) 569 ; 570 ; In raw-mode we will always have to clear TS and it will be recalculated 571 ; elsewhere and thus needs no saving. 572 ; 573 ; @param %1 Register to return the return status code in. 574 ; @param %2 Temporary scratch register. 575 ; @param %3 Ring-0 only, register pointing to the CPUMCPU structure 576 ; of the EMT we're on. 577 ; @uses EFLAGS, CR0, %1, %2 578 ; 579 %macro CPUMRZ_TOUCH_FPU_CLEAR_CR0_FPU_TRAPS_SET_RC 3 580 %ifdef IN_RC 581 ; 582 ; raw-mode - always clear it. We won't be here otherwise. 583 ; 584 mov %2, cr0 585 and %2, ~(X86_CR0_TS | X86_CR0_EM) 586 mov cr0, %2 587 588 %else 589 ; 590 ; ring-0 - slightly complicated. 591 ; 592 xor %1, %1 ; 0 / VINF_SUCCESS. Wishing for no CR0 changes. 593 mov [%3 + CPUMCPU.Host.cr0Fpu], %1 594 600 595 mov %2, cr0 601 596 test %2, X86_CR0_TS | X86_CR0_EM ; Make sure its safe to access the FPU state. 602 jz %%skip_cr0_write 603 mov %1, %2 ; Save old CR0 597 jz %%no_cr0_change 598 599 %ifdef VMM_R0_TOUCH_FPU 600 ; Touch the state and check that the kernel updated CR0 for us. 601 movdqa xmm0, xmm0 602 mov %2, cr0 603 test %2, X86_CR0_TS | X86_CR0_EM 604 jz %%cr0_changed 605 %endif 606 607 ; Save CR0 and clear them flags ourselves. 608 mov [%3 + CPUMCPU.Host.cr0Fpu], %2 604 609 and %2, ~(X86_CR0_TS | X86_CR0_EM) 605 610 mov cr0, %2 606 %%skip_cr0_write: 611 %endif ; IN_RING0 612 613 %%cr0_changed: 614 mov %1, VINF_CPUM_HOST_CR0_MODIFIED 615 %%no_cr0_change: 607 616 %endmacro 608 617 609 ;; 610 ; Restore CR0.TS and CR0.EM state if SAVE_CR0_CLEAR_FPU_TRAPS change it. 611 ; 612 ; @param %1 The register that SAVE_CR0_CLEAR_FPU_TRAPS saved the old CR0 in. 613 ; 614 %macro RESTORE_CR0 1 615 cmp %1, 0 616 je %%skip_cr0_restore 618 619 ;; 620 ; Restore CR0 if CR0.TS or CR0.EM were non-zero in the original state. 621 ; 622 ; @param %1 The original state to restore (or zero). 623 ; 624 %macro CPUMRZ_RESTORE_CR0_IF_TS_OR_EM_SET 1 625 test %1, X86_CR0_TS | X86_CR0_EM 626 jz %%skip_cr0_restore 617 627 mov cr0, %1 618 628 %%skip_cr0_restore:
Note:
See TracChangeset
for help on using the changeset viewer.