Changeset 61144 in vbox for trunk/src/VBox/VMM
- Timestamp:
- May 23, 2016 10:16:26 PM (9 years ago)
- svn:sync-xref-src-repo-rev:
- 107425
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 10 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/CPUMR0A.asm ¶
r61068 r61144 31 31 32 32 33 ;*******************************************************************************34 ;* Defined Constants And Macros *35 ;*******************************************************************************36 ;; The offset of the XMM registers in X86FXSTATE.37 ; Use define because I'm too lazy to convert the struct.38 %define XMM_OFF_IN_X86FXSTATE 16039 40 41 42 33 BEGINCODE 43 34 35 ;; 36 ; Makes sure the EMTs have a FPU state associated with them on hosts where we're 37 ; allowed to use it in ring-0 too. 38 ; 39 ; This ensure that we don't have to allocate the state lazily while trying to execute 40 ; guest code with preemption disabled or worse. 41 ; 42 ; @cproto VMMR0_INT_DECL(void) CPUMR0RegisterVCpuThread(PVMCPU pVCpu); 43 ; 44 BEGINPROC CPUMR0RegisterVCpuThread 45 push xBP 46 SEH64_PUSH_xBP 47 mov xBP, xSP 48 SEH64_SET_FRAME_xBP 0 49 SEH64_END_PROLOGUE 50 51 %ifdef CPUM_CAN_USE_FPU_IN_R0 52 movaps xmm0, xmm0 53 %endif 54 55 .return: 56 xor eax, eax ; paranoia 57 leave 58 ret 59 ENDPROC CPUMR0RegisterVCpuThread 44 60 45 61 … … 79 95 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0. 80 96 81 %ifdef VBOX_WITH_KERNEL_USING_XMM82 movaps xmm0, xmm0 ; Make 100% sure it's used before we save it or mess with CR0/XCR0.83 %endif84 SAVE_CR0_CLEAR_FPU_TRAPS xCX, xAX ; xCX is now old CR0 value, don't use!85 86 97 ; 87 98 ; Save the host state. … … 89 100 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USED_FPU_HOST 90 101 jnz .already_saved_host 102 103 %ifndef CPUM_CAN_USE_FPU_IN_R0 104 ; On systems where the kernel doesn't necessarily allow us to use the FPU 105 ; in ring-0 context, we have to disable FPU traps before doing fxsave/xsave 106 ; here. (xCX is 0 if no CR0 was necessary.) We leave it like that so IEM 107 ; can use the FPU/SSE/AVX host CPU features directly. 108 SAVE_CR0_CLEAR_FPU_TRAPS xCX, xAX 109 mov [pCpumCpu + CPUMCPU.Host.cr0Fpu], xCX 110 ;; @todo What about XCR0? 111 %endif 112 91 113 CPUMR0_SAVE_HOST 114 92 115 %ifdef VBOX_WITH_KERNEL_USING_XMM 93 116 jmp .load_guest … … 130 153 %endif 131 154 132 ;; @todo Save CR0 + XCR0 bits related to FPU, SSE and AVX*, leaving these register sets accessible to IEM.133 RESTORE_CR0 xCX134 155 or dword [pCpumCpu + CPUMCPU.fUseFlags], (CPUM_USED_FPU_GUEST | CPUM_USED_FPU_SINCE_REM | CPUM_USED_FPU_HOST) 135 156 popf … … 177 198 pushf ; The darwin kernel can get upset or upset things if an 178 199 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0. 179 SAVE_CR0_CLEAR_FPU_TRAPS xCX, xAX ; xCX is now old CR0 value, don't use!180 181 200 182 201 %ifdef VBOX_WITH_KERNEL_USING_XMM … … 232 251 CPUMR0_LOAD_HOST 233 252 234 ;; @todo Restore CR0 + XCR0 bits related to FPU, SSE and AVX* (for IEM). 253 %ifndef CPUM_CAN_USE_FPU_IN_R0 254 ; Restore the CR0 value we saved in cpumR0SaveHostRestoreGuestFPUState or 255 ; in cpumRZSaveHostFPUState. 256 mov xCX, [pCpumCpu + CPUMCPU.Host.cr0Fpu] 235 257 RESTORE_CR0 xCX 258 %endif 236 259 and dword [pCpumCpu + CPUMCPU.fUseFlags], ~(CPUM_USED_FPU_GUEST | CPUM_USED_FPU_HOST) 237 260 … … 273 296 pushf ; The darwin kernel can get upset or upset things if an 274 297 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0. 275 SAVE_CR0_CLEAR_FPU_TRAPS xCX, xAX ; xCX is now old CR0 value, don't use!276 298 277 299 CPUMR0_LOAD_HOST 278 300 301 %ifndef CPUM_CAN_USE_FPU_IN_R0 302 ; Restore the CR0 value we saved in cpumR0SaveHostRestoreGuestFPUState or 303 ; in cpumRZSaveHostFPUState. 304 ;; @todo What about XCR0? 305 mov xCX, [pCpumCpu + CPUMCPU.Host.cr0Fpu] 279 306 RESTORE_CR0 xCX 307 %endif 280 308 and dword [pCpumCpu + CPUMCPU.fUseFlags], ~CPUM_USED_FPU_HOST 281 309 popf -
trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp ¶
r59747 r61144 968 968 gvmmR0CreateDestroyUnlock(pGVMM); 969 969 970 CPUMR0RegisterVCpuThread(&pVM->aCpus[0]); 971 970 972 *ppVM = pVM; 971 973 Log(("GVMMR0CreateVM: pVM=%p pVMR3=%p pGVM=%p hGVM=%d\n", pVM, pVM->pVMR3, pGVM, iHandle)); … … 1426 1428 pVM->aCpus[idCpu].hNativeThreadR0 = pGVM->aCpus[idCpu].hEMT = RTThreadNativeSelf(); 1427 1429 1428 return VMMR0ThreadCtxHookCreateForEmt(&pVM->aCpus[idCpu]); 1430 rc = VMMR0ThreadCtxHookCreateForEmt(&pVM->aCpus[idCpu]); 1431 if (RT_SUCCESS(rc)) 1432 CPUMR0RegisterVCpuThread(&pVM->aCpus[idCpu]); 1433 return rc; 1429 1434 } 1430 1435 -
trunk/src/VBox/VMM/VMMR0/HMR0.cpp ¶
r60850 r61144 1505 1505 return VBOXSTRICTRC_VAL(rcStrict); 1506 1506 } 1507 1508 1509 /** 1510 * Notification from CPUM that it has unloaded the guest FPU/SSE/AVX state from 1511 * the host CPU and that guest access to it must be intercepted. 1512 * 1513 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 1514 */ 1515 VMMR0_INT_DECL(void) HMR0NotifyCpumUnloadedGuestFpuState(PVMCPU pVCpu) 1516 { 1517 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0); 1518 } 1519 1507 1520 1508 1521 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) -
trunk/src/VBox/VMM/VMMR3/VMM.cpp ¶
r60847 r61144 2874 2874 PRINT_FLAG(VMCPU_FF_,CSAM_SCAN_PAGE); 2875 2875 PRINT_FLAG(VMCPU_FF_,CSAM_PENDING_ACTION); 2876 PRINT_FLAG(VMCPU_FF_,CPUM); 2876 2877 #endif 2877 2878 if (f) -
trunk/src/VBox/VMM/VMMRC/CPUMRC.cpp ¶
r58123 r61144 31 31 #include <iprt/assert.h> 32 32 #include <VBox/log.h> 33 #include <iprt/asm-amd64-x86.h> 33 34 34 35 … … 220 221 #endif /* VBOX_WITH_RAW_RING1 */ 221 222 223 224 /** 225 * Called by trpmGCExitTrap when VMCPU_FF_CPUM is set (by CPUMRZ.cpp). 226 * 227 * We can be called unecessarily here if we returned to ring-3 for some other 228 * reason before we tried to resume executed guest code. This is detected and 229 * ignored. 230 * 231 * @param pVCpu The cross context CPU structure for the calling EMT. 232 */ 233 VMMRCDECL(void) CPUMRCProcessForceFlag(PVMVCPU pVCpu) 234 { 235 /* Only modify CR0 if we're in the post IEM state (host state saved, guest no longer active). */ 236 if ((pVCpu->cpum.s.fUseFlags & (CPUM_USED_FPU_GUEST | CPUM_USED_FPU_HOST)) == CPUM_USED_FPU_HOST) 237 { 238 /* 239 * Doing the same CR0 calculation as in AMD64andLegacy.mac so that we'll 240 * catch guest FPU accesses and load the FPU/SSE/AVX register state as needed. 241 */ 242 uint32_t cr0 = ASMGetCR0(); 243 cr0 |= pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM; 244 cr0 |= X86_CR0_TS | X86_CR0_MP; 245 ASMSetCR0(cr0); 246 } 247 } 248 -
trunk/src/VBox/VMM/VMMRC/TRPMRCHandlers.cpp ¶
r61015 r61144 219 219 | VMCPU_FF_PDM_CRITSECT | VMCPU_FF_IEM | VMCPU_FF_SELM_SYNC_GDT 220 220 | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT 221 | VMCPU_FF_IOM 221 | VMCPU_FF_IOM | VMCPU_FF_CPUM 222 222 ) 223 223 ) … … 234 234 APICUpdatePendingInterrupts(pVCpu); 235 235 #endif 236 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_CPUM)) 237 CPUMRCProcessForceFlag(pVCpu); 238 236 239 /* Pending Ring-3 action. */ 237 240 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TO_R3 | VMCPU_FF_PDM_CRITSECT | VMCPU_FF_IEM | VMCPU_FF_IOM)) -
trunk/src/VBox/VMM/VMMRZ/CPUMRZ.cpp ¶
r61068 r61144 50 50 case 0: 51 51 cpumRZSaveHostFPUState(&pVCpu->cpum.s); 52 #ifdef IN_RC 53 VMCPU_FF_SET(pVCpu, VMCPU_FF_CPUM); /* Must recalc CR0 before executing more code! */ 54 #endif 52 55 break; 53 56 … … 57 60 { 58 61 pVCpu->cpum.s.fUseFlags &= ~CPUM_SYNC_FPU_STATE; 59 /** @todo tell HM! */ 62 HMR0NotifyCpumUnloadedGuestFpuState(pVCpu); 60 63 } 61 64 #endif … … 63 66 64 67 case CPUM_USED_FPU_GUEST | CPUM_USED_FPU_HOST: 65 /** @todo tell HM! */66 68 #if defined(IN_RING0) && ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 67 69 Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_FPU_STATE)); … … 70 72 else 71 73 #endif 72 cpumRZSaveGuestFpuState(&pVCpu->cpum.s); 73 74 cpumRZSaveGuestFpuState(&pVCpu->cpum.s, true /*fLeaveFpuAccessible*/); 75 #ifdef IN_RING0 76 HMR0NotifyCpumUnloadedGuestFpuState(pVCpu); 77 #endif 74 78 break; 75 79 … … 112 116 else 113 117 #endif 114 cpumRZSaveGuestFpuState(&pVCpu->cpum.s );118 cpumRZSaveGuestFpuState(&pVCpu->cpum.s, false /*fLeaveFpuAccessible*/); 115 119 pVCpu->cpum.s.fUseFlags |= CPUM_USED_FPU_GUEST; 116 120 } … … 129 133 #if defined(VBOX_WITH_KERNEL_USING_XMM) && HC_ARCH_BITS == 64 130 134 NOREF(pVCpu); 135 #error "do NOT commit this" 131 136 #else 132 137 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST) … … 141 146 else 142 147 # endif 148 { 149 RTLogPrintf("calling cpumRZSaveGuestSseRegisters\n"); 143 150 cpumRZSaveGuestSseRegisters(&pVCpu->cpum.s); 151 } 144 152 } 145 153 #endif -
trunk/src/VBox/VMM/VMMRZ/CPUMRZA.asm ¶
r61112 r61144 34 34 ; Saves the host FPU/SSE/AVX state. 35 35 ; 36 ; Will return with CR0.EM and CR0.TS cleared! This is the normal state in 37 ; ring-0, whereas in raw-mode the caller will probably set VMCPU_FF_CPUM to 38 ; re-evaluate the situation before executing more guest code. 39 ; 36 40 ; @returns VINF_SUCCESS (0) in EAX 37 41 ; @param pCpumCpu x86:[ebp+8] gcc:rdi msc:rcx CPUMCPU pointer … … 66 70 pushf ; The darwin kernel can get upset or upset things if an 67 71 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0. 68 %ifdef VBOX_WITH_KERNEL_USING_XMM 72 73 %ifndef CPUM_CAN_USE_FPU_IN_R0 74 ; 75 ; In raw-mode context and on systems where the kernel doesn't necessarily 76 ; allow us to use the FPU in ring-0 context, we have to disable FPU traps 77 ; before doing fxsave/xsave here. (xCX is 0 if no CR0 was necessary.) We 78 ; leave it like that so IEM can use the FPU/SSE/AVX host CPU features directly. 79 ; 80 SAVE_CR0_CLEAR_FPU_TRAPS xCX, xAX 81 ;; @todo What about XCR0? 69 82 %ifdef IN_RING0 70 movaps xmm0, xmm0 ; Make 100% sure it's used before we save it or mess with CR0/XCR0. 71 %endif 72 %endif 73 SAVE_CR0_CLEAR_FPU_TRAPS xCX, xAX ; xCX is now old CR0 value, don't use! 74 83 mov [pCpumCpu + CPUMCPU.Host.cr0Fpu], xCX 84 %else 85 %error "Huh?" 86 %endif 87 %endif 88 ; 89 ; Save the host state (xsave/fxsave will cause thread FPU state to be 90 ; loaded on systems where we are allowed to use it in ring-0. 91 ; 75 92 CPUMR0_SAVE_HOST 76 ;; @todo Save CR0 + XCR0 bits related to FPU, SSE and AVX*, leaving these register sets accessible to IEM. 77 78 RESTORE_CR0 xCX 93 79 94 or dword [pCpumCpu + CPUMCPU.fUseFlags], (CPUM_USED_FPU_HOST | CPUM_USED_FPU_SINCE_REM) ; Latter is not necessarily true, but normally yes. 80 95 popf … … 95 110 ; 96 111 ; @param pCpumCpu x86:[ebp+8] gcc:rdi msc:rcx CPUMCPU pointer 112 ; @param fLeaveFpuAccessible x86:[ebp+c] gcc:sil msc:dl Whether to restore CR0 and XCR0 on 113 ; the way out. Only really applicable to RC. 97 114 ; 98 115 align 16 … … 124 141 pushf ; The darwin kernel can get upset or upset things if an 125 142 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0. 126 SAVE_CR0_CLEAR_FPU_TRAPS xCX, xAX ; xCX is now old CR0 value, don't use! 127 143 144 %ifdef IN_RC 145 SAVE_CR0_CLEAR_FPU_TRAPS xCX, xAX ; xCX must be preserved until CR0 is restored! 146 %endif 128 147 129 148 %ifndef VBOX_WITH_KERNEL_USING_XMM … … 185 204 %endif 186 205 206 and dword [pCpumCpu + CPUMCPU.fUseFlags], ~CPUM_USED_FPU_GUEST 207 %ifdef IN_RC 208 test byte [ebp + 0ch], 1 ; fLeaveFpuAccessible 209 jz .no_cr0_restore 187 210 RESTORE_CR0 xCX 188 and dword [pCpumCpu + CPUMCPU.fUseFlags], ~CPUM_USED_FPU_GUEST 189 211 .no_cr0_restore: 212 %endif 190 213 popf 191 214 %ifdef RT_ARCH_X86 … … 202 225 ;; 203 226 ; Saves the guest XMM0..15 registers. 227 ; 228 ; The purpose is to actualize the register state for read-only use, so CR0 is 229 ; restored in raw-mode context (so, the FPU/SSE/AVX CPU features can be 230 ; inaccessible upon return). 204 231 ; 205 232 ; @param pCpumCpu x86:[ebp+8] gcc:rdi msc:rcx CPUMCPU pointer … … 228 255 %else 229 256 %error "Invalid context!" 257 %endif 258 259 %ifdef IN_RC 260 ; Temporarily grant access to the SSE state. xDX must be preserved until CR0 is restored! 261 SAVE_CR0_CLEAR_FPU_TRAPS xDX, xAX 230 262 %endif 231 263 … … 251 283 movdqa [xCX + X86FXSTATE.xmm15], xmm15 252 284 %endif 285 286 %ifdef IN_RC 287 RESTORE_CR0 xDX ; Restore CR0 if we changed it above. 288 %endif 289 253 290 %endif ; !VBOX_WITH_KERNEL_USING_XMM 254 291 -
trunk/src/VBox/VMM/include/CPUMInternal.h ¶
r61068 r61144 262 262 uint32_t cr3; 263 263 uint32_t cr4; 264 /** The CR0 FPU state in HM mode. Can't use cr0 here because the 265 * 64-bit-on-32-bit-host world switches is using it. */ 266 uint32_t cr0Fpu; 264 267 /** @} */ 265 268 … … 286 289 RTSEL tr; 287 290 RTSEL trPadding; 288 uint32_t SysEnterPadding;289 291 290 292 /** The sysenter msr registers. … … 304 306 /** Control registers. 305 307 * @{ */ 308 /** The CR0 FPU state in HM mode. */ 306 309 uint64_t cr0; 307 310 /*uint64_t cr2; - scratch*/ … … 342 345 /** @} */ 343 346 344 /* padding to get 32byte aligned size */347 /* padding to get 64byte aligned size */ 345 348 uint8_t auPadding[4]; 346 349 … … 546 549 # if defined(IN_RC) || defined(IN_RING0) 547 550 DECLASM(void) cpumRZSaveHostFPUState(PCPUMCPU pCPUM); 548 DECLASM(void) cpumRZSaveGuestFpuState(PCPUMCPU pCPUM );551 DECLASM(void) cpumRZSaveGuestFpuState(PCPUMCPU pCPUM, bool fLeaveFpuAccessible); 549 552 DECLASM(void) cpumRZSaveGuestSseRegisters(PCPUMCPU pCPUM); 550 553 # endif -
trunk/src/VBox/VMM/include/CPUMInternal.mac ¶
r61109 r61144 32 32 %define CPUM_IS_AMD64 0 33 33 %endif 34 35 ;; @def CPUM_CAN_USE_FPU_IN_R0 36 ; Indicates that we can use the FPU directly in ring-0. 37 ; Only defined in ring-0. 38 %ifdef VBOX_WITH_KERNEL_USING_XMM 39 ; Systems using XMM registers as part of their kernel calling convention must 40 ; support saving and restoring the state while in ring-0. 64-bit Windows will 41 ; always switch the FPU state when context switching. 42 %define CPUM_CAN_USE_FPU_IN_R0 1 43 %endif 44 %ifdef RT_OS_WINDOWS 45 ; 32-bit Windows will load the FPU context of the current thread (user land). 46 %define CPUM_CAN_USE_FPU_IN_R0 1 47 %endif 48 %ifdef RT_OS_DARWIN 49 ; Intel Darwin kernels will load the FPU context of the current thread (user land). 50 %define CPUM_CAN_USE_FPU_IN_R0 1 51 %endif 52 %ifdef RT_OS_LINUX 53 ; Intel Linux kernels will load the FPU context of the current thread (user land), 54 ; at least that what my LXR research on 2.6.18+ indicates. It's possible this was 55 ; done differently at some point, I seems to recall issues with it ages and ages ago. 56 ; %define CPUM_CAN_USE_FPU_IN_R0 1 - test me first 57 %endif 58 %ifndef IN_RING0 59 %undef CPUM_CAN_USE_FPU_IN_R0 60 %endif 61 34 62 35 63 … … 308 336 .Host.cr3 resd 1 309 337 .Host.cr4 resd 1 338 .Host.cr0Fpu resd 1 310 339 311 340 .Host.dr0 resd 1 … … 325 354 .Host.trPadding resw 1 326 355 327 .Host.SysEnterPadding resd 1356 alignb 8 328 357 .Host.SysEnter.cs resq 1 329 358 .Host.SysEnter.eip resq 1 … … 334 363 %else ; 64-bit 335 364 365 .Host.cr0Fpu: 336 366 .Host.cr0 resq 1 337 367 ;.Host.cr2 resq 1 - scratch
Note:
See TracChangeset
for help on using the changeset viewer.