VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/CPUMR0A.asm@ 48749

Last change on this file since 48749 was 48567, checked in by vboxsync, 11 years ago

CPUMR0: Avoid EFER writes whenever possible. Don't know which kernels actually set the EFER.FFXSR bit.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 12.9 KB
Line 
1; $Id: CPUMR0A.asm 48567 2013-09-19 22:51:40Z vboxsync $
2;; @file
3; CPUM - Guest Context Assembly Routines.
4;
5
6;
7; Copyright (C) 2006-2013 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18;*******************************************************************************
19;* Header Files *
20;*******************************************************************************
21%include "VBox/asmdefs.mac"
22%include "VBox/vmm/vm.mac"
23%include "VBox/err.mac"
24%include "VBox/vmm/stam.mac"
25%include "CPUMInternal.mac"
26%include "iprt/x86.mac"
27%include "VBox/vmm/cpum.mac"
28
29%ifdef IN_RING3
30 %error "The jump table doesn't link on leopard."
31%endif
32
33;*******************************************************************************
34;* Defined Constants And Macros *
35;*******************************************************************************
36;; The offset of the XMM registers in X86FXSTATE.
37; Use define because I'm too lazy to convert the struct.
38%define XMM_OFF_IN_X86FXSTATE 160
39
40
41;*******************************************************************************
42;* External Symbols *
43;*******************************************************************************
44%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
45extern NAME(SUPR0AbsIs64bit)
46extern NAME(SUPR0Abs64bitKernelCS)
47extern NAME(SUPR0Abs64bitKernelSS)
48extern NAME(SUPR0Abs64bitKernelDS)
49extern NAME(SUPR0AbsKernelCS)
50%endif
51
52
53;*******************************************************************************
54;* Global Variables *
55;*******************************************************************************
56%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
57BEGINDATA
58;;
59; Store the SUPR0AbsIs64bit absolute value here so we can cmp/test without
60; needing to clobber a register. (This trick doesn't quite work for PE btw.
61; but that's not relevant atm.)
62GLOBALNAME g_fCPUMIs64bitHost
63 dd NAME(SUPR0AbsIs64bit)
64%endif
65
66
67BEGINCODE
68
69
70;;
71; Saves the host FPU/XMM state and restores the guest state.
72;
73; @returns 0
74; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer
75;
76align 16
77BEGINPROC cpumR0SaveHostRestoreGuestFPUState
78%ifdef RT_ARCH_AMD64
79 %ifdef RT_OS_WINDOWS
80 mov xDX, rcx
81 %else
82 mov xDX, rdi
83 %endif
84%else
85 mov xDX, dword [esp + 4]
86%endif
87 pushf ; The darwin kernel can get upset or upset things if an
88 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
89
90 ; Switch the state.
91 or dword [xDX + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)
92
93 mov xAX, cr0 ; Make sure its safe to access the FPU state.
94 mov xCX, xAX ; save old CR0
95 and xAX, ~(X86_CR0_TS | X86_CR0_EM)
96 mov cr0, xAX ;; @todo optimize this.
97
98%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
99 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
100 jz .legacy_mode
101 db 0xea ; jmp far .sixtyfourbit_mode
102 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
103.legacy_mode:
104%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
105
106%ifdef RT_ARCH_AMD64
107 ; Use explicit REX prefix. See @bugref{6398}.
108 o64 fxsave [xDX + CPUMCPU.Host.fpu] ; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor (safe assumption)
109 o64 fxrstor [xDX + CPUMCPU.Guest.fpu]
110%else
111 fxsave [xDX + CPUMCPU.Host.fpu] ; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor (safe assumption)
112 fxrstor [xDX + CPUMCPU.Guest.fpu]
113%endif
114
115%ifdef VBOX_WITH_KERNEL_USING_XMM
116 ; Restore the non-volatile xmm registers. ASSUMING 64-bit windows
117 lea r11, [xDX + CPUMCPU.Host.fpu + XMM_OFF_IN_X86FXSTATE]
118 movdqa xmm6, [r11 + 060h]
119 movdqa xmm7, [r11 + 070h]
120 movdqa xmm8, [r11 + 080h]
121 movdqa xmm9, [r11 + 090h]
122 movdqa xmm10, [r11 + 0a0h]
123 movdqa xmm11, [r11 + 0b0h]
124 movdqa xmm12, [r11 + 0c0h]
125 movdqa xmm13, [r11 + 0d0h]
126 movdqa xmm14, [r11 + 0e0h]
127 movdqa xmm15, [r11 + 0f0h]
128%endif
129
130.done:
131 mov cr0, xCX ; and restore old CR0 again ;; @todo optimize this.
132 popf
133 xor eax, eax
134 ret
135
136%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
137ALIGNCODE(16)
138BITS 64
139.sixtyfourbit_mode:
140 and edx, 0ffffffffh
141 o64 fxsave [rdx + CPUMCPU.Host.fpu]
142 o64 fxrstor [rdx + CPUMCPU.Guest.fpu]
143 jmp far [.fpret wrt rip]
144.fpret: ; 16:32 Pointer to .the_end.
145 dd .done, NAME(SUPR0AbsKernelCS)
146BITS 32
147%endif
148ENDPROC cpumR0SaveHostRestoreGuestFPUState
149
150
151%ifndef RT_ARCH_AMD64
152%ifdef VBOX_WITH_64_BITS_GUESTS
153%ifndef VBOX_WITH_HYBRID_32BIT_KERNEL
154;;
155; Saves the host FPU/XMM state
156;
157; @returns 0
158; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer
159;
160align 16
161BEGINPROC cpumR0SaveHostFPUState
162 mov xDX, dword [esp + 4]
163 pushf ; The darwin kernel can get upset or upset things if an
164 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
165
166 ; Switch the state.
167 or dword [xDX + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)
168
169 mov xAX, cr0 ; Make sure its safe to access the FPU state.
170 mov xCX, xAX ; save old CR0
171 and xAX, ~(X86_CR0_TS | X86_CR0_EM)
172 mov cr0, xAX ;; @todo optimize this.
173
174 fxsave [xDX + CPUMCPU.Host.fpu] ; ASSUMES that all VT-x/AMD-V boxes support fxsave/fxrstor (safe assumption)
175
176 mov cr0, xCX ; and restore old CR0 again ;; @todo optimize this.
177 popf
178 xor eax, eax
179 ret
180ENDPROC cpumR0SaveHostFPUState
181%endif
182%endif
183%endif
184
185
186;;
187; Saves the guest FPU/XMM state and restores the host state.
188;
189; @returns 0
190; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer
191;
192align 16
193BEGINPROC cpumR0SaveGuestRestoreHostFPUState
194%ifdef RT_ARCH_AMD64
195 %ifdef RT_OS_WINDOWS
196 mov xDX, rcx
197 %else
198 mov xDX, rdi
199 %endif
200%else
201 mov xDX, dword [esp + 4]
202%endif
203
204 ; Only restore FPU if guest has used it.
205 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
206 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU
207 jz short .fpu_not_used
208
209 pushf ; The darwin kernel can get upset or upset things if an
210 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
211
212 mov xAX, cr0 ; Make sure it's safe to access the FPU state.
213 mov xCX, xAX ; save old CR0
214 and xAX, ~(X86_CR0_TS | X86_CR0_EM)
215 mov cr0, xAX ;; @todo optimize this.
216
217%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
218 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
219 jz .legacy_mode
220 db 0xea ; jmp far .sixtyfourbit_mode
221 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
222.legacy_mode:
223%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
224
225%ifdef RT_ARCH_AMD64
226 ; Use explicit REX prefix. See @bugref{6398}.
227 o64 fxsave [xDX + CPUMCPU.Guest.fpu] ; ASSUMES that all VT-x/AMD-V boxes support fxsave/fxrstor (safe assumption)
228 o64 fxrstor [xDX + CPUMCPU.Host.fpu]
229%else
230 fxsave [xDX + CPUMCPU.Guest.fpu] ; ASSUMES that all VT-x/AMD-V boxes support fxsave/fxrstor (safe assumption)
231 fxrstor [xDX + CPUMCPU.Host.fpu]
232%endif
233
234.done:
235 mov cr0, xCX ; and restore old CR0 again ;; @todo optimize this.
236 and dword [xDX + CPUMCPU.fUseFlags], ~CPUM_USED_FPU
237 popf
238.fpu_not_used:
239 xor eax, eax
240 ret
241
242%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
243ALIGNCODE(16)
244BITS 64
245.sixtyfourbit_mode:
246 and edx, 0ffffffffh
247 o64 fxsave [rdx + CPUMCPU.Guest.fpu]
248 o64 fxrstor [rdx + CPUMCPU.Host.fpu]
249 jmp far [.fpret wrt rip]
250.fpret: ; 16:32 Pointer to .the_end.
251 dd .done, NAME(SUPR0AbsKernelCS)
252BITS 32
253%endif
254ENDPROC cpumR0SaveGuestRestoreHostFPUState
255
256
257;;
258; Sets the host's FPU/XMM state
259;
260; @returns 0
261; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer
262;
263align 16
264BEGINPROC cpumR0RestoreHostFPUState
265%ifdef RT_ARCH_AMD64
266 %ifdef RT_OS_WINDOWS
267 mov xDX, rcx
268 %else
269 mov xDX, rdi
270 %endif
271%else
272 mov xDX, dword [esp + 4]
273%endif
274
275 ; Restore FPU if guest has used it.
276 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
277 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU
278 jz short .fpu_not_used
279
280 pushf ; The darwin kernel can get upset or upset things if an
281 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
282
283 mov xAX, cr0
284 mov xCX, xAX ; save old CR0
285 and xAX, ~(X86_CR0_TS | X86_CR0_EM)
286 mov cr0, xAX
287
288%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
289 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
290 jz .legacy_mode
291 db 0xea ; jmp far .sixtyfourbit_mode
292 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
293.legacy_mode:
294%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
295
296%ifdef RT_ARCH_AMD64
297 o64 fxrstor [xDX + CPUMCPU.Host.fpu]
298%else
299 fxrstor [xDX + CPUMCPU.Host.fpu]
300%endif
301
302.done:
303 mov cr0, xCX ; and restore old CR0 again
304 and dword [xDX + CPUMCPU.fUseFlags], ~CPUM_USED_FPU
305 popf
306.fpu_not_used:
307 xor eax, eax
308 ret
309
310%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
311ALIGNCODE(16)
312BITS 64
313.sixtyfourbit_mode:
314 and edx, 0ffffffffh
315 o64 fxrstor [rdx + CPUMCPU.Host.fpu]
316 jmp far [.fpret wrt rip]
317.fpret: ; 16:32 Pointer to .the_end.
318 dd .done, NAME(SUPR0AbsKernelCS)
319BITS 32
320%endif
321ENDPROC cpumR0RestoreHostFPUState
322
323
324%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
325;;
326; DECLASM(void) cpumR0SaveDRx(uint64_t *pa4Regs);
327;
328ALIGNCODE(16)
329BEGINPROC cpumR0SaveDRx
330%ifdef RT_ARCH_AMD64
331 %ifdef ASM_CALL64_GCC
332 mov xCX, rdi
333 %endif
334%else
335 mov xCX, dword [esp + 4]
336%endif
337 pushf ; Just to be on the safe side.
338 cli
339%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
340 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
341 jz .legacy_mode
342 db 0xea ; jmp far .sixtyfourbit_mode
343 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
344.legacy_mode:
345%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
346
347 ;
348 ; Do the job.
349 ;
350 mov xAX, dr0
351 mov xDX, dr1
352 mov [xCX], xAX
353 mov [xCX + 8 * 1], xDX
354 mov xAX, dr2
355 mov xDX, dr3
356 mov [xCX + 8 * 2], xAX
357 mov [xCX + 8 * 3], xDX
358
359.done:
360 popf
361 ret
362
363%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
364ALIGNCODE(16)
365BITS 64
366.sixtyfourbit_mode:
367 and ecx, 0ffffffffh
368
369 mov rax, dr0
370 mov rdx, dr1
371 mov r8, dr2
372 mov r9, dr3
373 mov [rcx], rax
374 mov [rcx + 8 * 1], rdx
375 mov [rcx + 8 * 2], r8
376 mov [rcx + 8 * 3], r9
377 jmp far [.fpret wrt rip]
378.fpret: ; 16:32 Pointer to .the_end.
379 dd .done, NAME(SUPR0AbsKernelCS)
380BITS 32
381%endif
382ENDPROC cpumR0SaveDRx
383
384
385;;
386; DECLASM(void) cpumR0LoadDRx(uint64_t const *pa4Regs);
387;
388ALIGNCODE(16)
389BEGINPROC cpumR0LoadDRx
390%ifdef RT_ARCH_AMD64
391 %ifdef ASM_CALL64_GCC
392 mov xCX, rdi
393 %endif
394%else
395 mov xCX, dword [esp + 4]
396%endif
397 pushf ; Just to be on the safe side.
398 cli
399%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
400 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
401 jz .legacy_mode
402 db 0xea ; jmp far .sixtyfourbit_mode
403 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
404.legacy_mode:
405%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
406
407 ;
408 ; Do the job.
409 ;
410 mov xAX, [xCX]
411 mov xDX, [xCX + 8 * 1]
412 mov dr0, xAX
413 mov dr1, xDX
414 mov xAX, [xCX + 8 * 2]
415 mov xDX, [xCX + 8 * 3]
416 mov dr2, xAX
417 mov dr3, xDX
418
419.done:
420 popf
421 ret
422
423%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
424ALIGNCODE(16)
425BITS 64
426.sixtyfourbit_mode:
427 and ecx, 0ffffffffh
428
429 mov rax, [rcx]
430 mov rdx, [rcx + 8 * 1]
431 mov r8, [rcx + 8 * 2]
432 mov r9, [rcx + 8 * 3]
433 mov dr0, rax
434 mov dr1, rdx
435 mov dr2, r8
436 mov dr3, r9
437 jmp far [.fpret wrt rip]
438.fpret: ; 16:32 Pointer to .the_end.
439 dd .done, NAME(SUPR0AbsKernelCS)
440BITS 32
441%endif
442ENDPROC cpumR0LoadDRx
443
444%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
445
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette