Changeset 20539 in vbox
- Timestamp:
- Jun 13, 2009 9:22:54 PM (16 years ago)
- File:
-
- 1 copied
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/CPUMR0UnusedA.asm
r20536 r20539 45 45 extern NAME(SUPR0Abs64bitKernelDS) 46 46 extern NAME(SUPR0AbsKernelCS) 47 %endif 48 49 50 ;******************************************************************************* 51 ;* Global Variables * 52 ;******************************************************************************* 53 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 54 BEGINDATA 55 ;; 56 ; Store the SUPR0AbsIs64bit absolute value here so we can cmp/test without 57 ; needing to clobber a register. (This trick doesn't quite work for PE btw. 58 ; but that's not relevant atm.) 59 GLOBALNAME g_fCPUMIs64bitHost 60 dd NAME(SUPR0AbsIs64bit) 61 %endif 62 63 64 BEGINCODE 65 66 67 ;; 68 ; Saves the host FPU/XMM state and restores the guest state. 69 ; 70 ; @returns 0 71 ; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer 72 ; 73 align 16 74 BEGINPROC cpumR0SaveHostRestoreGuestFPUState 75 %ifdef RT_ARCH_AMD64 76 %ifdef RT_OS_WINDOWS 77 mov xDX, rcx 78 %else 79 mov xDX, rdi 80 %endif 81 %else 82 mov xDX, dword [esp + 4] 83 %endif 84 pushf ; The darwin kernel can get upset or upset things if an 85 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0. 86 87 ; Switch the state. 88 or dword [xDX + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM) 89 90 mov xAX, cr0 ; Make sure its safe to access the FPU state. 91 mov xCX, xAX ; save old CR0 92 and xAX, ~(X86_CR0_TS | X86_CR0_EM) 93 mov cr0, xAX ;; @todo optimize this. 94 95 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 96 cmp byte [NAME(g_fCPUMIs64bitHost)], 0 97 jz .legacy_mode 98 db 0xea ; jmp far .sixtyfourbit_mode 99 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS) 100 .legacy_mode: 101 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL 102 103 fxsave [xDX + CPUMCPU.Host.fpu] ; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor (safe assumption) 104 fxrstor [xDX + CPUMCPU.Guest.fpu] 105 106 .done: 107 mov cr0, xCX ; and restore old CR0 again ;; @todo optimize this. 108 popf 109 xor eax, eax 110 ret 111 112 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0 113 ALIGNCODE(16) 114 BITS 64 115 .sixtyfourbit_mode: 116 and edx, 0ffffffffh 117 fxsave [rdx + CPUMCPU.Host.fpu] 118 fxrstor [rdx + CPUMCPU.Guest.fpu] 119 jmp far [.fpret wrt rip] 120 .fpret: ; 16:32 Pointer to .the_end. 121 dd .done, NAME(SUPR0AbsKernelCS) 122 BITS 32 123 %endif 124 ENDPROC cpumR0SaveHostRestoreGuestFPUState 125 126 %ifndef RT_ARCH_AMD64 127 %ifdef VBOX_WITH_64_BITS_GUESTS 128 %ifndef VBOX_WITH_HYBRID_32BIT_KERNEL 129 ;; 130 ; Saves the host FPU/XMM state 131 ; 132 ; @returns 0 133 ; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer 134 ; 135 align 16 136 BEGINPROC cpumR0SaveHostFPUState 137 mov xDX, dword [esp + 4] 138 139 ; Switch the state. 140 or dword [xDX + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM) 141 142 mov xAX, cr0 ; Make sure its safe to access the FPU state. 143 mov xCX, xAX ; save old CR0 144 and xAX, ~(X86_CR0_TS | X86_CR0_EM) 145 mov cr0, xAX ;; @todo optimize this. 146 147 fxsave [xDX + CPUMCPU.Host.fpu] ; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor (safe assumption) 148 149 mov cr0, xCX ; and restore old CR0 again ;; @todo optimize this. 150 xor eax, eax 151 ret 152 ENDPROC cpumR0SaveHostFPUState 153 %endif 154 %endif 155 %endif 156 157 ;; 158 ; Saves the guest FPU/XMM state and restores the host state. 159 ; 160 ; @returns 0 161 ; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer 162 ; 163 align 16 164 BEGINPROC cpumR0SaveGuestRestoreHostFPUState 165 %ifdef RT_ARCH_AMD64 166 %ifdef RT_OS_WINDOWS 167 mov xDX, rcx 168 %else 169 mov xDX, rdi 170 %endif 171 %else 172 mov xDX, dword [esp + 4] 173 %endif 174 175 ; Only restore FPU if guest has used it. 176 ; Using fxrstor should ensure that we're not causing unwanted exception on the host. 177 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU 178 jz short .fpu_not_used 179 180 pushf ; The darwin kernel can get upset or upset things if an 181 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0. 182 183 mov xAX, cr0 ; Make sure it's safe to access the FPU state. 184 mov xCX, xAX ; save old CR0 185 and xAX, ~(X86_CR0_TS | X86_CR0_EM) 186 mov cr0, xAX ;; @todo optimize this. 187 188 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 189 cmp byte [NAME(g_fCPUMIs64bitHost)], 0 190 jz .legacy_mode 191 db 0xea ; jmp far .sixtyfourbit_mode 192 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS) 193 .legacy_mode: 194 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL 195 196 fxsave [xDX + CPUMCPU.Guest.fpu] ; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor (safe assumption) 197 fxrstor [xDX + CPUMCPU.Host.fpu] 198 199 .done: 200 mov cr0, xCX ; and restore old CR0 again ;; @todo optimize this. 201 and dword [xDX + CPUMCPU.fUseFlags], ~CPUM_USED_FPU 202 popf 203 .fpu_not_used: 204 xor eax, eax 205 ret 206 207 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0 208 ALIGNCODE(16) 209 BITS 64 210 .sixtyfourbit_mode: 211 and edx, 0ffffffffh 212 fxsave [rdx + CPUMCPU.Guest.fpu] 213 fxrstor [rdx + CPUMCPU.Host.fpu] 214 jmp far [.fpret wrt rip] 215 .fpret: ; 16:32 Pointer to .the_end. 216 dd .done, NAME(SUPR0AbsKernelCS) 217 BITS 32 218 %endif 219 ENDPROC cpumR0SaveGuestRestoreHostFPUState 220 221 222 ;; 223 ; Sets the host's FPU/XMM state 224 ; 225 ; @returns 0 226 ; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer 227 ; 228 align 16 229 BEGINPROC cpumR0RestoreHostFPUState 230 %ifdef RT_ARCH_AMD64 231 %ifdef RT_OS_WINDOWS 232 mov xDX, rcx 233 %else 234 mov xDX, rdi 235 %endif 236 %else 237 mov xDX, dword [esp + 4] 238 %endif 239 240 ; Restore FPU if guest has used it. 241 ; Using fxrstor should ensure that we're not causing unwanted exception on the host. 242 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU 243 jz short .fpu_not_used 244 245 pushf ; The darwin kernel can get upset or upset things if an 246 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0. 247 248 mov xAX, cr0 249 mov xCX, xAX ; save old CR0 250 and xAX, ~(X86_CR0_TS | X86_CR0_EM) 251 mov cr0, xAX 252 253 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 254 cmp byte [NAME(g_fCPUMIs64bitHost)], 0 255 jz .legacy_mode 256 db 0xea ; jmp far .sixtyfourbit_mode 257 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS) 258 .legacy_mode: 259 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL 260 261 fxrstor [xDX + CPUMCPU.Host.fpu] 262 263 .done: 264 mov cr0, xCX ; and restore old CR0 again 265 and dword [xDX + CPUMCPU.fUseFlags], ~CPUM_USED_FPU 266 popf 267 .fpu_not_used: 268 xor eax, eax 269 ret 270 271 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0 272 ALIGNCODE(16) 273 BITS 64 274 .sixtyfourbit_mode: 275 and edx, 0ffffffffh 276 fxrstor [rdx + CPUMCPU.Host.fpu] 277 jmp far [.fpret wrt rip] 278 .fpret: ; 16:32 Pointer to .the_end. 279 dd .done, NAME(SUPR0AbsKernelCS) 280 BITS 32 281 %endif 282 ENDPROC cpumR0RestoreHostFPUState 47 extern NAME(g_fCPUMIs64bitHost) 48 %endif 283 49 284 50 … … 610 376 ENDPROC cpumR0GetMXCSR 611 377 612 613 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0614 ;;615 ; DECLASM(void) cpumR0SaveDRx(uint64_t *pa4Regs);616 ;617 ALIGNCODE(16)618 BEGINPROC cpumR0SaveDRx619 %ifdef RT_ARCH_AMD64620 %ifdef ASM_CALL64_GCC621 mov xCX, rdi622 %endif623 %else624 mov xCX, dword [esp + 4]625 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL626 cmp byte [NAME(g_fCPUMIs64bitHost)], 0627 jz .legacy_mode628 db 0xea ; jmp far .sixtyfourbit_mode629 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)630 .legacy_mode:631 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL632 %endif633 634 ;635 ; Do the job.636 ;637 mov xAX, dr0638 mov xDX, dr1639 mov [xCX], xAX640 mov [xCX + 8 * 1], xDX641 mov xAX, dr2642 mov xDX, dr3643 mov [xCX + 8 * 2], xAX644 mov [xCX + 8 * 3], xDX645 646 .done:647 ret648 649 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0650 ALIGNCODE(16)651 BITS 64652 .sixtyfourbit_mode:653 and ecx, 0ffffffffh654 655 mov rax, dr0656 mov rdx, dr1657 mov r8, dr2658 mov r9, dr3659 mov [rcx], rax660 mov [rcx + 8 * 1], rdx661 mov [rcx + 8 * 2], r8662 mov [rcx + 8 * 3], r9663 jmp far [.fpret wrt rip]664 .fpret: ; 16:32 Pointer to .the_end.665 dd .done, NAME(SUPR0AbsKernelCS)666 BITS 32667 %endif668 ENDPROC cpumR0SaveDRx669 670 671 ;;672 ; DECLASM(void) cpumR0LoadDRx(uint64_t const *pa4Regs);673 ;674 ALIGNCODE(16)675 BEGINPROC cpumR0LoadDRx676 %ifdef RT_ARCH_AMD64677 %ifdef ASM_CALL64_GCC678 mov xCX, rdi679 %endif680 %else681 mov xCX, dword [esp + 4]682 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL683 cmp byte [NAME(g_fCPUMIs64bitHost)], 0684 jz .legacy_mode685 db 0xea ; jmp far .sixtyfourbit_mode686 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)687 .legacy_mode:688 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL689 %endif690 691 ;692 ; Do the job.693 ;694 mov xAX, [xCX]695 mov xDX, [xCX + 8 * 1]696 mov dr0, xAX697 mov dr1, xDX698 mov xAX, [xCX + 8 * 2]699 mov xDX, [xCX + 8 * 3]700 mov dr2, xAX701 mov dr3, xDX702 703 .done:704 ret705 706 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0707 ALIGNCODE(16)708 BITS 64709 .sixtyfourbit_mode:710 and ecx, 0ffffffffh711 712 mov rax, [rcx]713 mov rdx, [rcx + 8 * 1]714 mov r8, [rcx + 8 * 2]715 mov r9, [rcx + 8 * 3]716 mov dr0, rax717 mov dr1, rdx718 mov dr2, r8719 mov dr3, r9720 jmp far [.fpret wrt rip]721 .fpret: ; 16:32 Pointer to .the_end.722 dd .done, NAME(SUPR0AbsKernelCS)723 BITS 32724 %endif725 ENDPROC cpumR0LoadDRx726 727 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
Note:
See TracChangeset
for help on using the changeset viewer.