Changeset 16113 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Jan 21, 2009 9:08:29 AM (16 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/CPUMInternal.h
r16108 r16113 392 392 DECLASM(int) cpumR0SaveHostRestoreGuestFPUState(PCPUMCPU pCPUM); 393 393 DECLASM(int) cpumR0SaveGuestRestoreHostFPUState(PCPUMCPU pCPUM); 394 DECLASM(int) cpumR0SaveHostFPUState(PCPUMCPU pCPUM); 394 395 DECLASM(int) cpumR0RestoreHostFPUState(PCPUMCPU pCPUM); 395 396 DECLASM(void) cpumR0LoadFPU(PCPUMCTX pCtx); -
trunk/src/VBox/VMM/PGM.cpp
r15647 r16113 1375 1375 AssertRelease(pVM->pgm.s.HCPhysInterPaePDPT != NIL_RTHCPHYS && !(pVM->pgm.s.HCPhysInterPaePDPT & PAGE_OFFSET_MASK)); 1376 1376 pVM->pgm.s.HCPhysInterPaePML4 = MMPage2Phys(pVM, pVM->pgm.s.pInterPaePML4); 1377 AssertRelease(pVM->pgm.s.HCPhysInterPaePML4 != NIL_RTHCPHYS && !(pVM->pgm.s.HCPhysInterPaePML4 & PAGE_OFFSET_MASK) );1377 AssertRelease(pVM->pgm.s.HCPhysInterPaePML4 != NIL_RTHCPHYS && !(pVM->pgm.s.HCPhysInterPaePML4 & PAGE_OFFSET_MASK) && pVM->pgm.s.HCPhysInterPaePML4 < 0xffffffff); 1378 1378 1379 1379 /* -
trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp
r16111 r16113 171 171 if (CPUMIsGuestInLongModeEx(pCtx)) 172 172 { 173 /* Save/Restore the state on entry as we need to be in 64 bits mode to access the full state. */ 174 pVCpu->cpum.s.fUseFlags |= CPUM_SYNC_FPU_STATE | CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM; 175 /** @todo who is saving the host state?? */ 173 /* Save the host state and record the fact (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM). */ 174 cpumR0SaveHostFPUState(&pVCpu->cpum.s); 175 176 /* Restore the state on entry as we need to be in 64 bits mode to access the full state. */ 177 pVCpu->cpum.s.fUseFlags |= CPUM_SYNC_FPU_STATE; 176 178 } 177 179 else … … 263 265 } 264 266 265 pVCpu->cpum.s.fUseFlags |= CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM; /** @todo clean up, this is done above by the ASM worker. */267 Assert(pVCpu->cpum.s.fUseFlags & (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM) == (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)); 266 268 return VINF_SUCCESS; 267 269 } … … 286 288 { 287 289 if (!(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_FPU_STATE)) 290 { 288 291 HWACCMR0SaveFPUState(pVM, pVCpu, pCtx); 289 cpumR0RestoreHostFPUState(&pVCpu->cpum.s); 292 cpumR0RestoreHostFPUState(&pVCpu->cpum.s); 293 } 294 /* else nothing to do; we didn't perform a world switch */ 290 295 } 291 296 else -
trunk/src/VBox/VMM/VMMR0/CPUMR0A.asm
r16109 r16113 66 66 67 67 ;; 68 ; Saves the guest FPU/XMM state and restores the host one.68 ; Saves the host FPU/XMM state and restores the guest state. 69 69 ; 70 70 ; @returns 0 … … 99 99 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL 100 100 101 fxsave [xDX + CPUMCPU.Host.fpu] ; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor 101 fxsave [xDX + CPUMCPU.Host.fpu] ; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor (safe assumption) 102 102 fxrstor [xDX + CPUMCPU.Guest.fpu] 103 103 … … 122 122 ENDPROC cpumR0SaveHostRestoreGuestFPUState 123 123 124 125 ;; 126 ; Saves the guest FPU/XMM state and restores the host one. 124 %ifndef RT_ARCH_AMD64 125 %ifdef VBOX_WITH_64_BITS_GUESTS 126 %ifndef VBOX_WITH_HYBRID_32BIT_KERNEL 127 ;; 128 ; Saves the host FPU/XMM state 129 ; 130 ; @returns 0 131 ; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer 132 ; 133 align 16 134 BEGINPROC cpumR0SaveHostFPUState 135 mov xDX, dword [esp + 4] 136 137 ; Switch the state. 138 or dword [xDX + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM) 139 140 mov xAX, cr0 ; Make sure its safe to access the FPU state. 141 mov xCX, xAX ; save old CR0 142 and xAX, ~(X86_CR0_TS | X86_CR0_EM) 143 mov cr0, xAX ;; @todo optimize this. 144 145 fxsave [xDX + CPUMCPU.Host.fpu] ; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor (safe assumption) 146 147 mov cr0, xCX ; and restore old CR0 again ;; @todo optimize this. 148 xor eax, eax 149 ret 150 ENDPROC cpumR0SaveHostFPUState 151 %endif 152 %endif 153 %endif 154 155 ;; 156 ; Saves the guest FPU/XMM state and restores the host state. 127 157 ; 128 158 ; @returns 0 … … 159 189 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL 160 190 161 fxsave [xDX + CPUMCPU.Guest.fpu] ; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor 191 fxsave [xDX + CPUMCPU.Guest.fpu] ; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor (safe assumption) 162 192 fxrstor [xDX + CPUMCPU.Host.fpu] 163 193
Note:
See TracChangeset
for help on using the changeset viewer.