VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/CPUMR0A.asm@ 20538

Last change on this file since 20538 was 20538, checked in by vboxsync, 15 years ago

CPUMR0A.asm: Split out the currently unused code into CPUMR0UnusedA..asm (easier to handle now).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 11.2 KB
Line 
1; $Id: CPUMR0A.asm 20538 2009-06-13 21:15:27Z vboxsync $
2;; @file
3; CPUM - Guest Context Assembly Routines.
4;
5
6;
7; Copyright (C) 2006-2007 Sun Microsystems, Inc.
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18; Clara, CA 95054 USA or visit http://www.sun.com if you need
19; additional information or have any questions.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%include "VBox/asmdefs.mac"
26%include "VBox/vm.mac"
27%include "VBox/err.mac"
28%include "VBox/stam.mac"
29%include "CPUMInternal.mac"
30%include "VBox/x86.mac"
31%include "VBox/cpum.mac"
32
33%ifdef IN_RING3
34 %error "The jump table doesn't link on leopard."
35%endif
36
37
38;*******************************************************************************
39;* External Symbols *
40;*******************************************************************************
41%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
42extern NAME(SUPR0AbsIs64bit)
43extern NAME(SUPR0Abs64bitKernelCS)
44extern NAME(SUPR0Abs64bitKernelSS)
45extern NAME(SUPR0Abs64bitKernelDS)
46extern NAME(SUPR0AbsKernelCS)
47%endif
48
49
50;*******************************************************************************
51;* Global Variables *
52;*******************************************************************************
53%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
54BEGINDATA
55;;
56; Store the SUPR0AbsIs64bit absolute value here so we can cmp/test without
57; needing to clobber a register. (This trick doesn't quite work for PE btw.
58; but that's not relevant atm.)
59GLOBALNAME g_fCPUMIs64bitHost
60 dd NAME(SUPR0AbsIs64bit)
61%endif
62
63
64BEGINCODE
65
66
67;;
68; Saves the host FPU/XMM state and restores the guest state.
69;
70; @returns 0
71; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer
72;
73align 16
74BEGINPROC cpumR0SaveHostRestoreGuestFPUState
75%ifdef RT_ARCH_AMD64
76 %ifdef RT_OS_WINDOWS
77 mov xDX, rcx
78 %else
79 mov xDX, rdi
80 %endif
81%else
82 mov xDX, dword [esp + 4]
83%endif
84 pushf ; The darwin kernel can get upset or upset things if an
85 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
86
87 ; Switch the state.
88 or dword [xDX + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)
89
90 mov xAX, cr0 ; Make sure its safe to access the FPU state.
91 mov xCX, xAX ; save old CR0
92 and xAX, ~(X86_CR0_TS | X86_CR0_EM)
93 mov cr0, xAX ;; @todo optimize this.
94
95%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
96 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
97 jz .legacy_mode
98 db 0xea ; jmp far .sixtyfourbit_mode
99 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
100.legacy_mode:
101%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
102
103 fxsave [xDX + CPUMCPU.Host.fpu] ; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor (safe assumption)
104 fxrstor [xDX + CPUMCPU.Guest.fpu]
105
106.done:
107 mov cr0, xCX ; and restore old CR0 again ;; @todo optimize this.
108 popf
109 xor eax, eax
110 ret
111
112%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
113ALIGNCODE(16)
114BITS 64
115.sixtyfourbit_mode:
116 and edx, 0ffffffffh
117 fxsave [rdx + CPUMCPU.Host.fpu]
118 fxrstor [rdx + CPUMCPU.Guest.fpu]
119 jmp far [.fpret wrt rip]
120.fpret: ; 16:32 Pointer to .the_end.
121 dd .done, NAME(SUPR0AbsKernelCS)
122BITS 32
123%endif
124ENDPROC cpumR0SaveHostRestoreGuestFPUState
125
126%ifndef RT_ARCH_AMD64
127%ifdef VBOX_WITH_64_BITS_GUESTS
128%ifndef VBOX_WITH_HYBRID_32BIT_KERNEL
129;;
130; Saves the host FPU/XMM state
131;
132; @returns 0
133; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer
134;
135align 16
136BEGINPROC cpumR0SaveHostFPUState
137 mov xDX, dword [esp + 4]
138
139 ; Switch the state.
140 or dword [xDX + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)
141
142 mov xAX, cr0 ; Make sure its safe to access the FPU state.
143 mov xCX, xAX ; save old CR0
144 and xAX, ~(X86_CR0_TS | X86_CR0_EM)
145 mov cr0, xAX ;; @todo optimize this.
146
147 fxsave [xDX + CPUMCPU.Host.fpu] ; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor (safe assumption)
148
149 mov cr0, xCX ; and restore old CR0 again ;; @todo optimize this.
150 xor eax, eax
151 ret
152ENDPROC cpumR0SaveHostFPUState
153%endif
154%endif
155%endif
156
157;;
158; Saves the guest FPU/XMM state and restores the host state.
159;
160; @returns 0
161; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer
162;
163align 16
164BEGINPROC cpumR0SaveGuestRestoreHostFPUState
165%ifdef RT_ARCH_AMD64
166 %ifdef RT_OS_WINDOWS
167 mov xDX, rcx
168 %else
169 mov xDX, rdi
170 %endif
171%else
172 mov xDX, dword [esp + 4]
173%endif
174
175 ; Only restore FPU if guest has used it.
176 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
177 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU
178 jz short .fpu_not_used
179
180 pushf ; The darwin kernel can get upset or upset things if an
181 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
182
183 mov xAX, cr0 ; Make sure it's safe to access the FPU state.
184 mov xCX, xAX ; save old CR0
185 and xAX, ~(X86_CR0_TS | X86_CR0_EM)
186 mov cr0, xAX ;; @todo optimize this.
187
188%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
189 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
190 jz .legacy_mode
191 db 0xea ; jmp far .sixtyfourbit_mode
192 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
193.legacy_mode:
194%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
195
196 fxsave [xDX + CPUMCPU.Guest.fpu] ; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor (safe assumption)
197 fxrstor [xDX + CPUMCPU.Host.fpu]
198
199.done:
200 mov cr0, xCX ; and restore old CR0 again ;; @todo optimize this.
201 and dword [xDX + CPUMCPU.fUseFlags], ~CPUM_USED_FPU
202 popf
203.fpu_not_used:
204 xor eax, eax
205 ret
206
207%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
208ALIGNCODE(16)
209BITS 64
210.sixtyfourbit_mode:
211 and edx, 0ffffffffh
212 fxsave [rdx + CPUMCPU.Guest.fpu]
213 fxrstor [rdx + CPUMCPU.Host.fpu]
214 jmp far [.fpret wrt rip]
215.fpret: ; 16:32 Pointer to .the_end.
216 dd .done, NAME(SUPR0AbsKernelCS)
217BITS 32
218%endif
219ENDPROC cpumR0SaveGuestRestoreHostFPUState
220
221
222;;
223; Sets the host's FPU/XMM state
224;
225; @returns 0
226; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer
227;
228align 16
229BEGINPROC cpumR0RestoreHostFPUState
230%ifdef RT_ARCH_AMD64
231 %ifdef RT_OS_WINDOWS
232 mov xDX, rcx
233 %else
234 mov xDX, rdi
235 %endif
236%else
237 mov xDX, dword [esp + 4]
238%endif
239
240 ; Restore FPU if guest has used it.
241 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
242 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU
243 jz short .fpu_not_used
244
245 pushf ; The darwin kernel can get upset or upset things if an
246 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
247
248 mov xAX, cr0
249 mov xCX, xAX ; save old CR0
250 and xAX, ~(X86_CR0_TS | X86_CR0_EM)
251 mov cr0, xAX
252
253%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
254 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
255 jz .legacy_mode
256 db 0xea ; jmp far .sixtyfourbit_mode
257 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
258.legacy_mode:
259%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
260
261 fxrstor [xDX + CPUMCPU.Host.fpu]
262
263.done:
264 mov cr0, xCX ; and restore old CR0 again
265 and dword [xDX + CPUMCPU.fUseFlags], ~CPUM_USED_FPU
266 popf
267.fpu_not_used:
268 xor eax, eax
269 ret
270
271%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
272ALIGNCODE(16)
273BITS 64
274.sixtyfourbit_mode:
275 and edx, 0ffffffffh
276 fxrstor [rdx + CPUMCPU.Host.fpu]
277 jmp far [.fpret wrt rip]
278.fpret: ; 16:32 Pointer to .the_end.
279 dd .done, NAME(SUPR0AbsKernelCS)
280BITS 32
281%endif
282ENDPROC cpumR0RestoreHostFPUState
283
284
285%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
286;;
287; DECLASM(void) cpumR0SaveDRx(uint64_t *pa4Regs);
288;
289ALIGNCODE(16)
290BEGINPROC cpumR0SaveDRx
291%ifdef RT_ARCH_AMD64
292 %ifdef ASM_CALL64_GCC
293 mov xCX, rdi
294 %endif
295%else
296 mov xCX, dword [esp + 4]
297%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
298 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
299 jz .legacy_mode
300 db 0xea ; jmp far .sixtyfourbit_mode
301 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
302.legacy_mode:
303%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
304%endif
305
306 ;
307 ; Do the job.
308 ;
309 mov xAX, dr0
310 mov xDX, dr1
311 mov [xCX], xAX
312 mov [xCX + 8 * 1], xDX
313 mov xAX, dr2
314 mov xDX, dr3
315 mov [xCX + 8 * 2], xAX
316 mov [xCX + 8 * 3], xDX
317
318.done:
319 ret
320
321%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
322ALIGNCODE(16)
323BITS 64
324.sixtyfourbit_mode:
325 and ecx, 0ffffffffh
326
327 mov rax, dr0
328 mov rdx, dr1
329 mov r8, dr2
330 mov r9, dr3
331 mov [rcx], rax
332 mov [rcx + 8 * 1], rdx
333 mov [rcx + 8 * 2], r8
334 mov [rcx + 8 * 3], r9
335 jmp far [.fpret wrt rip]
336.fpret: ; 16:32 Pointer to .the_end.
337 dd .done, NAME(SUPR0AbsKernelCS)
338BITS 32
339%endif
340ENDPROC cpumR0SaveDRx
341
342
343;;
344; DECLASM(void) cpumR0LoadDRx(uint64_t const *pa4Regs);
345;
346ALIGNCODE(16)
347BEGINPROC cpumR0LoadDRx
348%ifdef RT_ARCH_AMD64
349 %ifdef ASM_CALL64_GCC
350 mov xCX, rdi
351 %endif
352%else
353 mov xCX, dword [esp + 4]
354%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
355 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
356 jz .legacy_mode
357 db 0xea ; jmp far .sixtyfourbit_mode
358 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
359.legacy_mode:
360%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
361%endif
362
363 ;
364 ; Do the job.
365 ;
366 mov xAX, [xCX]
367 mov xDX, [xCX + 8 * 1]
368 mov dr0, xAX
369 mov dr1, xDX
370 mov xAX, [xCX + 8 * 2]
371 mov xDX, [xCX + 8 * 3]
372 mov dr2, xAX
373 mov dr3, xDX
374
375.done:
376 ret
377
378%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
379ALIGNCODE(16)
380BITS 64
381.sixtyfourbit_mode:
382 and ecx, 0ffffffffh
383
384 mov rax, [rcx]
385 mov rdx, [rcx + 8 * 1]
386 mov r8, [rcx + 8 * 2]
387 mov r9, [rcx + 8 * 3]
388 mov dr0, rax
389 mov dr1, rdx
390 mov dr2, r8
391 mov dr3, r9
392 jmp far [.fpret wrt rip]
393.fpret: ; 16:32 Pointer to .the_end.
394 dd .done, NAME(SUPR0AbsKernelCS)
395BITS 32
396%endif
397ENDPROC cpumR0LoadDRx
398
399%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
400
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette