VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/CPUMR0A.asm@ 55059

Last change on this file since 55059 was 55059, checked in by vboxsync, 10 years ago

CPUMR[0C]A.asm: Fixed 32-bit breakage. Forgot to update the pCpumCpu parameter location.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 19.8 KB
Line 
1 ; $Id: CPUMR0A.asm 55059 2015-03-31 23:05:42Z vboxsync $
2;; @file
3; CPUM - Ring-0 Assembly Routines (supporting HM and IEM).
4;
5
6;
7; Copyright (C) 2006-2015 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18;*******************************************************************************
19;* Header Files *
20;*******************************************************************************
21%include "VBox/asmdefs.mac"
22%include "VBox/vmm/vm.mac"
23%include "VBox/err.mac"
24%include "VBox/vmm/stam.mac"
25%include "CPUMInternal.mac"
26%include "iprt/x86.mac"
27%include "VBox/vmm/cpum.mac"
28
29%ifdef IN_RING3
30 %error "The jump table doesn't link on leopard."
31%endif
32
33;*******************************************************************************
34;* Defined Constants And Macros *
35;*******************************************************************************
36;; The offset of the XMM registers in X86FXSTATE.
37; Use define because I'm too lazy to convert the struct.
38%define XMM_OFF_IN_X86FXSTATE 160
39%define IP_OFF_IN_X86FXSTATE 08h
40%define CS_OFF_IN_X86FXSTATE 0ch
41%define DS_OFF_IN_X86FXSTATE 14h
42
43
44;*******************************************************************************
45;* External Symbols *
46;*******************************************************************************
47%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
48extern NAME(SUPR0AbsIs64bit)
49extern NAME(SUPR0Abs64bitKernelCS)
50extern NAME(SUPR0Abs64bitKernelSS)
51extern NAME(SUPR0Abs64bitKernelDS)
52extern NAME(SUPR0AbsKernelCS)
53%endif
54
55
56;*******************************************************************************
57;* Global Variables *
58;*******************************************************************************
59%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
60BEGINDATA
61%if 0 ; Currently not used.
62g_r32_Zero: dd 0.0
63%endif
64
65;;
66; Store the SUPR0AbsIs64bit absolute value here so we can cmp/test without
67; needing to clobber a register. (This trick doesn't quite work for PE btw.
68; but that's not relevant atm.)
69GLOBALNAME g_fCPUMIs64bitHost
70 dd NAME(SUPR0AbsIs64bit)
71%endif
72
73
74BEGINCODE
75
76%if 0 ; Currently not used anywhere.
77;;
78; Macro for FXSAVE/FXRSTOR leaky behaviour on AMD CPUs, see cpumR3CheckLeakyFpu().
79;
80; Cleans the FPU state, if necessary, before restoring the FPU.
81;
82; This macro ASSUMES CR0.TS is not set!
83;
84; @param xDX Pointer to CPUMCPU.
85; @uses xAX, EFLAGS
86;
87; Changes here should also be reflected in CPUMRCA.asm's copy!
88;
89%macro CLEANFPU 0
90 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USE_FFXSR_LEAKY
91 jz .nothing_to_clean
92
93 xor eax, eax
94 fnstsw ax ; FSW -> AX.
95 test eax, RT_BIT(7) ; If FSW.ES (bit 7) is set, clear it to not cause FPU exceptions
96 ; while clearing & loading the FPU bits in 'clean_fpu' below.
97 jz .clean_fpu
98 fnclex
99
100.clean_fpu:
101 ffree st7 ; Clear FPU stack register(7)'s tag entry to prevent overflow if a wraparound occurs.
102 ; for the upcoming push (load)
103 fild dword [g_r32_Zero xWrtRIP] ; Explicit FPU load to overwrite FIP, FOP, FDP registers in the FPU.
104.nothing_to_clean:
105%endmacro
106%endif ; Unused.
107
108
109;; Macro for FXSAVE for the guest FPU but tries to figure out whether to
110; save the 32-bit FPU state or 64-bit FPU state.
111;
112; @param %1 Pointer to CPUMCPU.
113; @param %2 Pointer to XState.
114; @uses xAX, xDX, EFLAGS, 20h of stack.
115;
116%macro SAVE_32_OR_64_FPU 2
117 o64 fxsave [%2]
118
119 xor edx, edx
120 cmp dword [%2 + CS_OFF_IN_X86FXSTATE], 0
121 jne short %%save_done
122
123 sub rsp, 20h ; Only need 1ch bytes but keep stack aligned otherwise we #GP(0).
124 fnstenv [rsp]
125 movzx eax, word [rsp + 10h]
126 mov [%2 + CS_OFF_IN_X86FXSTATE], eax
127 movzx eax, word [rsp + 18h]
128 add rsp, 20h
129 mov [%2 + DS_OFF_IN_X86FXSTATE], eax
130 mov edx, X86_FXSTATE_RSVD_32BIT_MAGIC
131
132%%save_done:
133 mov dword [%2 + X86_OFF_FXSTATE_RSVD], edx
134%endmacro
135
136;;
137; Wrapper for selecting 32-bit or 64-bit FXRSTOR according to what SAVE_32_OR_64_FPU did.
138;
139; @param %1 Pointer to CPUMCPU.
140; @param %2 Pointer to XState.
141; @uses xAX, xDX, EFLAGS
142;
143%macro RESTORE_32_OR_64_FPU 2
144 cmp dword [%2 + X86_OFF_FXSTATE_RSVD], X86_FXSTATE_RSVD_32BIT_MAGIC
145 jne short %%restore_64bit_fpu
146 fxrstor [%2]
147 jmp short %%restore_fpu_done
148%%restore_64bit_fpu:
149 o64 fxrstor [%2]
150%%restore_fpu_done:
151%endmacro
152
153
154;;
155; Clears CR0.TS and CR0.EM if necessary, saving the previous result.
156;
157; This is used to avoid FPU exceptions when touching the FPU state.
158;
159; @param %1 Register to save the old CR0 in (pass to RESTORE_CR0).
160; @param %2 Temporary scratch register.
161; @uses EFLAGS, CR0
162;
163%macro SAVE_CR0_CLEAR_FPU_TRAPS 2
164 xor %1, %1
165 mov %2, cr0
166 test %2, X86_CR0_TS | X86_CR0_EM ; Make sure its safe to access the FPU state.
167 jz %%skip_cr0_write
168 mov %1, %2 ; Save old CR0
169 and %2, ~(X86_CR0_TS | X86_CR0_EM)
170 mov cr0, %2
171%%skip_cr0_write:
172%endmacro
173
174;;
175; Restore CR0.TS and CR0.EM state if SAVE_CR0_CLEAR_FPU_TRAPS change it.
176;
177; @param %1 The register that SAVE_CR0_CLEAR_FPU_TRAPS saved the old CR0 in.
178;
179%macro RESTORE_CR0 1
180 cmp %1, 0
181 je %%skip_cr0_restore
182 mov cr0, %1
183%%skip_cr0_restore:
184%endmacro
185
186
187;;
188; Saves the host FPU/SSE/AVX state and restores the guest FPU/SSE/AVX state.
189;
190; @returns 0
191; @param pCpumCpu x86:[ebp+8] gcc:rdi msc:rcx CPUMCPU pointer
192;
193align 16
194BEGINPROC cpumR0SaveHostRestoreGuestFPUState
195 ;
196 ; Prologue - xAX+xDX must be free for XSAVE/XRSTOR input.
197 ;
198%ifdef RT_ARCH_AMD64
199 %ifdef RT_OS_WINDOWS
200 mov r11, rcx
201 %else
202 mov r11, rdi
203 %endif
204 %define pCpumCpu r11
205 %define pXState r10
206%else
207 push ebp
208 mov ebp, esp
209 push ebx
210 push esi
211 mov ebx, dword [ebp + 8]
212 %define pCpumCpu ebx
213 %define pXState esi
214%endif
215
216 pushf ; The darwin kernel can get upset or upset things if an
217 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
218
219 SAVE_CR0_CLEAR_FPU_TRAPS xCX, xAX ; xCX is now old CR0 value, don't use!
220
221 ;
222 ; Switch state.
223 ;
224 mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateR0]
225
226%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
227 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
228 jz .legacy_mode
229 db 0xea ; jmp far .sixtyfourbit_mode
230 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
231.legacy_mode:
232%endif
233
234%ifdef RT_ARCH_AMD64
235 o64 fxsave [pXState] ; Use explicit REX prefix. See @bugref{6398}.
236
237 ; Restore the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}.
238 mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateR0]
239 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE
240 jnz short .fpu_load_32_or_64
241 fxrstor [pXState]
242 jmp short .fpu_load_done
243.fpu_load_32_or_64:
244 RESTORE_32_OR_64_FPU pCpumCpu, pXState
245.fpu_load_done:
246%else
247 fxsave [pXState]
248 mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateR0]
249 fxrstor [pXState]
250%endif
251
252%ifdef VBOX_WITH_KERNEL_USING_XMM
253 ; Restore the non-volatile xmm registers. ASSUMING 64-bit host.
254 mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateR0]
255 movdqa xmm6, [pXState + XMM_OFF_IN_X86FXSTATE + 060h]
256 movdqa xmm7, [pXState + XMM_OFF_IN_X86FXSTATE + 070h]
257 movdqa xmm8, [pXState + XMM_OFF_IN_X86FXSTATE + 080h]
258 movdqa xmm9, [pXState + XMM_OFF_IN_X86FXSTATE + 090h]
259 movdqa xmm10, [pXState + XMM_OFF_IN_X86FXSTATE + 0a0h]
260 movdqa xmm11, [pXState + XMM_OFF_IN_X86FXSTATE + 0b0h]
261 movdqa xmm12, [pXState + XMM_OFF_IN_X86FXSTATE + 0c0h]
262 movdqa xmm13, [pXState + XMM_OFF_IN_X86FXSTATE + 0d0h]
263 movdqa xmm14, [pXState + XMM_OFF_IN_X86FXSTATE + 0e0h]
264 movdqa xmm15, [pXState + XMM_OFF_IN_X86FXSTATE + 0f0h]
265%endif
266
267.done:
268 RESTORE_CR0 xCX
269 or dword [pCpumCpu + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)
270 popf
271
272%ifdef RT_ARCH_X86
273 pop esi
274 pop ebx
275 leave
276%endif
277 xor eax, eax
278 ret
279
280%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
281ALIGNCODE(16)
282BITS 64
283.sixtyfourbit_mode:
284 o64 fxsave [pXState]
285
286 ; Restore the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}.
287 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE
288 jnz short .fpu_load_32_or_64_darwin
289 mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateR0]
290 fxrstor [pXState]
291 jmp short .fpu_load_done_darwin
292.fpu_load_32_or_64_darwin:
293 RESTORE_32_OR_64_FPU pCpumCpu, pXState
294.fpu_load_done_darwin:
295
296 jmp far [.fpret wrt rip]
297.fpret: ; 16:32 Pointer to .the_end.
298 dd .done, NAME(SUPR0AbsKernelCS)
299BITS 32
300%endif
301ENDPROC cpumR0SaveHostRestoreGuestFPUState
302
303
304%ifndef RT_ARCH_AMD64
305%ifdef VBOX_WITH_64_BITS_GUESTS
306%ifndef VBOX_WITH_HYBRID_32BIT_KERNEL
307;;
308; Saves the host FPU/SSE/AVX state.
309;
310; @returns VINF_SUCCESS (0) in EAX
311; @param pCpumCpu x86:[ebp+8] gcc:rdi msc:rcx CPUMCPU pointer
312;
313align 16
314BEGINPROC cpumR0SaveHostFPUState
315 ;
316 ; Prologue - xAX+xDX must be free for XSAVE/XRSTOR input.
317 ;
318%ifdef RT_ARCH_AMD64
319 %ifdef RT_OS_WINDOWS
320 mov r11, rcx
321 %else
322 mov r11, rdi
323 %endif
324 %define pCpumCpu r11
325 %define pXState r10
326%else
327 push ebp
328 mov ebp, esp
329 push ebx
330 push esi
331 mov ebx, dword [ebp + 8]
332 %define pCpumCpu ebx
333 %define pXState esi
334%endif
335
336 pushf ; The darwin kernel can get upset or upset things if an
337 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
338 SAVE_CR0_CLEAR_FPU_TRAPS xCX, xAX ; xCX is now old CR0 value, don't use!
339
340 ;
341 ; Save the host state.
342 ;
343 mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateR0]
344
345%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
346 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
347 jz .legacy_mode
348 db 0xea ; jmp far .sixtyfourbit_mode
349 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
350.legacy_mode:
351%endif
352
353%ifdef RT_ARCH_AMD64
354 o64 fxsave [pXstate]
355%else
356 fxsave [pXState]
357%endif
358
359.done:
360 RESTORE_CR0 xCX
361 or dword [pCpumCpu + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)
362 popf
363
364%ifdef RT_ARCH_X86
365 pop esi
366 pop ebx
367 leave
368%endif
369 xor eax, eax
370 ret
371
372%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
373ALIGNCODE(16)
374BITS 64
375.sixtyfourbit_mode:
376 ; Save the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}.
377 o64 fxsave [pXstate]
378 jmp far [.fpret wrt rip]
379.fpret: ; 16:32 Pointer to .the_end.
380 dd .done, NAME(SUPR0AbsKernelCS)
381BITS 32
382%endif
383%undef pCpumCpu
384%undef pXState
385ENDPROC cpumR0SaveHostFPUState
386%endif
387%endif
388%endif
389
390
391;;
392; Saves the guest FPU/SSE/AVX state and restores the host FPU/SSE/AVX state.
393;
394; @returns VINF_SUCCESS (0) in eax.
395; @param pCpumCpu x86:[ebp+8] gcc:rdi msc:rcx CPUMCPU pointer
396;
397align 16
398BEGINPROC cpumR0SaveGuestRestoreHostFPUState
399 ;
400 ; Prologue - xAX+xDX must be free for XSAVE/XRSTOR input.
401 ;
402%ifdef RT_ARCH_AMD64
403 %ifdef RT_OS_WINDOWS
404 mov r11, rcx
405 %else
406 mov r11, rdi
407 %endif
408 %define pCpumCpu r11
409 %define pXState r10
410%else
411 push ebp
412 mov ebp, esp
413 push ebx
414 push esi
415 mov ebx, dword [ebp + 8]
416 %define pCpumCpu ebx
417 %define pXState esi
418%endif
419
420 ;
421 ; Only restore FPU if guest has used it.
422 ;
423 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USED_FPU
424 jz .fpu_not_used
425
426 pushf ; The darwin kernel can get upset or upset things if an
427 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
428 SAVE_CR0_CLEAR_FPU_TRAPS xCX, xAX ; xCX is now old CR0 value, don't use!
429
430 mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateR0]
431
432%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
433 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
434 jz .legacy_mode
435 db 0xea ; jmp far .sixtyfourbit_mode
436 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
437.legacy_mode:
438%endif
439
440%ifdef RT_ARCH_AMD64
441 ; Save the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}.
442 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE
443 jnz short .fpu_save_32_or_64
444 fxsave [pXState]
445 jmp short .fpu_save_done
446.fpu_save_32_or_64:
447 SAVE_32_OR_64_FPU pCpumCpu, pXState
448.fpu_save_done:
449
450 mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateR0]
451 o64 fxrstor [pXState] ; Use explicit REX prefix. See @bugref{6398}.
452%else
453 fxsave [pXState] ; ASSUMES that all VT-x/AMD-V boxes support fxsave/fxrstor (safe assumption)
454 mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateR0]
455 fxrstor [pXState]
456%endif
457
458.done:
459 RESTORE_CR0 xCX
460 and dword [pCpumCpu + CPUMCPU.fUseFlags], ~CPUM_USED_FPU
461 popf
462
463.fpu_not_used:
464%ifdef RT_ARCH_X86
465 pop esi
466 pop ebx
467 leave
468%endif
469 xor eax, eax
470 ret
471
472%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
473ALIGNCODE(16)
474BITS 64
475.sixtyfourbit_mode:
476 ; Save the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}.
477 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE
478 jnz short .fpu_save_32_or_64_darwin
479 fxsave [pXState]
480 jmp short .fpu_save_done_darwin
481.fpu_save_32_or_64_darwin:
482 SAVE_32_OR_64_FPU pCpumCpu, pXState
483.fpu_save_done_darwin:
484
485 mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateR0]
486 o64 fxrstor [pXstate]
487 jmp far [.fpret wrt rip]
488.fpret: ; 16:32 Pointer to .the_end.
489 dd .done, NAME(SUPR0AbsKernelCS)
490BITS 32
491%endif
492%undef pCpumCpu
493%undef pXState
494ENDPROC cpumR0SaveGuestRestoreHostFPUState
495
496
497;;
498; Restores the host's FPU/SSE/AVX state from pCpumCpu->Host.
499;
500; @returns 0
501; @param pCpumCpu x86:[ebp+8] gcc:rdi msc:rcx CPUMCPU pointer
502;
503align 16
504BEGINPROC cpumR0RestoreHostFPUState
505 ;
506 ; Prologue - xAX+xDX must be free for XSAVE/XRSTOR input.
507 ;
508%ifdef RT_ARCH_AMD64
509 %ifdef RT_OS_WINDOWS
510 mov r11, rcx
511 %else
512 mov r11, rdi
513 %endif
514 %define pCpumCpu r11
515 %define pXState r10
516%else
517 push ebp
518 mov ebp, esp
519 push ebx
520 push esi
521 mov ebx, dword [ebp + 8]
522 %define pCpumCpu ebx
523 %define pXState esi
524%endif
525
526 ;
527 ; Restore FPU if guest has used it.
528 ;
529 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USED_FPU
530 jz short .fpu_not_used
531
532 pushf ; The darwin kernel can get upset or upset things if an
533 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
534 SAVE_CR0_CLEAR_FPU_TRAPS xCX, xAX ; xCX is now old CR0 value, don't use!
535
536 mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateR0]
537
538%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
539 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
540 jz .legacy_mode
541 db 0xea ; jmp far .sixtyfourbit_mode
542 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
543.legacy_mode:
544%endif
545
546%ifdef RT_ARCH_AMD64
547 o64 fxrstor [pXState]
548%else
549 fxrstor [pXState]
550%endif
551
552.done:
553 RESTORE_CR0 xCX
554 and dword [pCpumCpu + CPUMCPU.fUseFlags], ~CPUM_USED_FPU
555 popf
556
557.fpu_not_used:
558%ifdef RT_ARCH_X86
559 pop esi
560 pop ebx
561 leave
562%endif
563 xor eax, eax
564 ret
565
566%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
567ALIGNCODE(16)
568BITS 64
569.sixtyfourbit_mode:
570 o64 fxrstor [pXState]
571 jmp far [.fpret wrt rip]
572.fpret: ; 16:32 Pointer to .the_end.
573 dd .done, NAME(SUPR0AbsKernelCS)
574BITS 32
575%endif
576%undef pCpumCPu
577%undef pXState
578ENDPROC cpumR0RestoreHostFPUState
579
580
581%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
582;;
583; DECLASM(void) cpumR0SaveDRx(uint64_t *pa4Regs);
584;
585ALIGNCODE(16)
586BEGINPROC cpumR0SaveDRx
587%ifdef RT_ARCH_AMD64
588 %ifdef ASM_CALL64_GCC
589 mov xCX, rdi
590 %endif
591%else
592 mov xCX, dword [esp + 4]
593%endif
594 pushf ; Just to be on the safe side.
595 cli
596%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
597 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
598 jz .legacy_mode
599 db 0xea ; jmp far .sixtyfourbit_mode
600 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
601.legacy_mode:
602%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
603
604 ;
605 ; Do the job.
606 ;
607 mov xAX, dr0
608 mov xDX, dr1
609 mov [xCX], xAX
610 mov [xCX + 8 * 1], xDX
611 mov xAX, dr2
612 mov xDX, dr3
613 mov [xCX + 8 * 2], xAX
614 mov [xCX + 8 * 3], xDX
615
616.done:
617 popf
618 ret
619
620%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
621ALIGNCODE(16)
622BITS 64
623.sixtyfourbit_mode:
624 and ecx, 0ffffffffh
625
626 mov rax, dr0
627 mov rdx, dr1
628 mov r8, dr2
629 mov r9, dr3
630 mov [rcx], rax
631 mov [rcx + 8 * 1], rdx
632 mov [rcx + 8 * 2], r8
633 mov [rcx + 8 * 3], r9
634 jmp far [.fpret wrt rip]
635.fpret: ; 16:32 Pointer to .the_end.
636 dd .done, NAME(SUPR0AbsKernelCS)
637BITS 32
638%endif
639ENDPROC cpumR0SaveDRx
640
641
642;;
643; DECLASM(void) cpumR0LoadDRx(uint64_t const *pa4Regs);
644;
645ALIGNCODE(16)
646BEGINPROC cpumR0LoadDRx
647%ifdef RT_ARCH_AMD64
648 %ifdef ASM_CALL64_GCC
649 mov xCX, rdi
650 %endif
651%else
652 mov xCX, dword [esp + 4]
653%endif
654 pushf ; Just to be on the safe side.
655 cli
656%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
657 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
658 jz .legacy_mode
659 db 0xea ; jmp far .sixtyfourbit_mode
660 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
661.legacy_mode:
662%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
663
664 ;
665 ; Do the job.
666 ;
667 mov xAX, [xCX]
668 mov xDX, [xCX + 8 * 1]
669 mov dr0, xAX
670 mov dr1, xDX
671 mov xAX, [xCX + 8 * 2]
672 mov xDX, [xCX + 8 * 3]
673 mov dr2, xAX
674 mov dr3, xDX
675
676.done:
677 popf
678 ret
679
680%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
681ALIGNCODE(16)
682BITS 64
683.sixtyfourbit_mode:
684 and ecx, 0ffffffffh
685
686 mov rax, [rcx]
687 mov rdx, [rcx + 8 * 1]
688 mov r8, [rcx + 8 * 2]
689 mov r9, [rcx + 8 * 3]
690 mov dr0, rax
691 mov dr1, rdx
692 mov dr2, r8
693 mov dr3, r9
694 jmp far [.fpret wrt rip]
695.fpret: ; 16:32 Pointer to .the_end.
696 dd .done, NAME(SUPR0AbsKernelCS)
697BITS 32
698%endif
699ENDPROC cpumR0LoadDRx
700
701%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
702
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette