VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/CPUMR0A.asm@ 55048

Last change on this file since 55048 was 55048, checked in by vboxsync, 10 years ago

VMM,REM: Allocate the FPU/SSE/AVX/FUTURE state stuff. We need to use pointers to substates anyway and this will make CPUMCPU much smaller.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 19.6 KB
Line 
1; $Id: CPUMR0A.asm 55048 2015-03-31 18:49:19Z vboxsync $
2;; @file
3; CPUM - Ring-0 Assembly Routines (supporting HM and IEM).
4;
5
6;
7; Copyright (C) 2006-2015 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18;*******************************************************************************
19;* Header Files *
20;*******************************************************************************
21%include "VBox/asmdefs.mac"
22%include "VBox/vmm/vm.mac"
23%include "VBox/err.mac"
24%include "VBox/vmm/stam.mac"
25%include "CPUMInternal.mac"
26%include "iprt/x86.mac"
27%include "VBox/vmm/cpum.mac"
28
29%ifdef IN_RING3
30 %error "The jump table doesn't link on leopard."
31%endif
32
33;*******************************************************************************
34;* Defined Constants And Macros *
35;*******************************************************************************
36;; The offset of the XMM registers in X86FXSTATE.
37; Use define because I'm too lazy to convert the struct.
38%define XMM_OFF_IN_X86FXSTATE 160
39%define IP_OFF_IN_X86FXSTATE 08h
40%define CS_OFF_IN_X86FXSTATE 0ch
41%define DS_OFF_IN_X86FXSTATE 14h
42
43
44;*******************************************************************************
45;* External Symbols *
46;*******************************************************************************
47%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
48extern NAME(SUPR0AbsIs64bit)
49extern NAME(SUPR0Abs64bitKernelCS)
50extern NAME(SUPR0Abs64bitKernelSS)
51extern NAME(SUPR0Abs64bitKernelDS)
52extern NAME(SUPR0AbsKernelCS)
53%endif
54
55
56;*******************************************************************************
57;* Global Variables *
58;*******************************************************************************
59%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
60BEGINDATA
61%if 0 ; Currently not used.
62g_r32_Zero: dd 0.0
63%endif
64
65;;
66; Store the SUPR0AbsIs64bit absolute value here so we can cmp/test without
67; needing to clobber a register. (This trick doesn't quite work for PE btw.
68; but that's not relevant atm.)
69GLOBALNAME g_fCPUMIs64bitHost
70 dd NAME(SUPR0AbsIs64bit)
71%endif
72
73
74BEGINCODE
75
76%if 0 ; Currently not used anywhere.
77;;
78; Macro for FXSAVE/FXRSTOR leaky behaviour on AMD CPUs, see cpumR3CheckLeakyFpu().
79;
80; Cleans the FPU state, if necessary, before restoring the FPU.
81;
82; This macro ASSUMES CR0.TS is not set!
83;
84; @param xDX Pointer to CPUMCPU.
85; @uses xAX, EFLAGS
86;
87; Changes here should also be reflected in CPUMRCA.asm's copy!
88;
89%macro CLEANFPU 0
90 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USE_FFXSR_LEAKY
91 jz .nothing_to_clean
92
93 xor eax, eax
94 fnstsw ax ; FSW -> AX.
95 test eax, RT_BIT(7) ; If FSW.ES (bit 7) is set, clear it to not cause FPU exceptions
96 ; while clearing & loading the FPU bits in 'clean_fpu' below.
97 jz .clean_fpu
98 fnclex
99
100.clean_fpu:
101 ffree st7 ; Clear FPU stack register(7)'s tag entry to prevent overflow if a wraparound occurs.
102 ; for the upcoming push (load)
103 fild dword [g_r32_Zero xWrtRIP] ; Explicit FPU load to overwrite FIP, FOP, FDP registers in the FPU.
104.nothing_to_clean:
105%endmacro
106%endif ; Unused.
107
108
109;; Macro for FXSAVE for the guest FPU but tries to figure out whether to
110; save the 32-bit FPU state or 64-bit FPU state.
111;
112; @param %1 Pointer to CPUMCPU.
113; @param %2 Pointer to XState.
114; @uses xAX, xDX, EFLAGS, 20h of stack.
115;
116%macro SAVE_32_OR_64_FPU 2
117 o64 fxsave [%2]
118
119 xor edx, edx
120 cmp dword [%2 + CS_OFF_IN_X86FXSTATE], 0
121 jne short %%save_done
122
123 sub rsp, 20h ; Only need 1ch bytes but keep stack aligned otherwise we #GP(0).
124 fnstenv [rsp]
125 movzx eax, word [rsp + 10h]
126 mov [%2 + CS_OFF_IN_X86FXSTATE], eax
127 movzx eax, word [rsp + 18h]
128 add rsp, 20h
129 mov [%2 + DS_OFF_IN_X86FXSTATE], eax
130 mov edx, X86_FXSTATE_RSVD_32BIT_MAGIC
131
132%%save_done:
133 mov dword [%2 + X86_OFF_FXSTATE_RSVD], edx
134%endmacro
135
136;;
137; Wrapper for selecting 32-bit or 64-bit FXRSTOR according to what SAVE_32_OR_64_FPU did.
138;
139; @param %1 Pointer to CPUMCPU.
140; @param %2 Pointer to XState.
141; @uses xAX, xDX, EFLAGS
142;
143%macro RESTORE_32_OR_64_FPU 2
144 cmp dword [%2 + X86_OFF_FXSTATE_RSVD], X86_FXSTATE_RSVD_32BIT_MAGIC
145 jne short %%restore_64bit_fpu
146 fxrstor [%2]
147 jmp short %%restore_fpu_done
148%%restore_64bit_fpu:
149 o64 fxrstor [%2]
150%%restore_fpu_done:
151%endmacro
152
153
154;;
155; Clears CR0.TS and CR0.EM if necessary, saving the previous result.
156;
157; This is used to avoid FPU exceptions when touching the FPU state.
158;
159; @param %1 Register to save the old CR0 in (pass to RESTORE_CR0).
160; @param %2 Temporary scratch register.
161; @uses EFLAGS, CR0
162;
163%macro SAVE_CR0_CLEAR_FPU_TRAPS 2
164 xor %1, %1
165 mov %2, cr0
166 test %2, X86_CR0_TS | X86_CR0_EM ; Make sure its safe to access the FPU state.
167 jz %%skip_cr0_write
168 mov %1, %2 ; Save old CR0
169 and %2, ~(X86_CR0_TS | X86_CR0_EM)
170 mov cr0, %2
171%%skip_cr0_write:
172%endmacro
173
174;;
175; Restore CR0.TS and CR0.EM state if SAVE_CR0_CLEAR_FPU_TRAPS change it.
176;
177; @param %1 The register that SAVE_CR0_CLEAR_FPU_TRAPS saved the old CR0 in.
178;
179%macro RESTORE_CR0 1
180 cmp %1, 0
181 je %%skip_cr0_restore
182 mov cr0, %1
183%%skip_cr0_restore:
184%endmacro
185
186
187;;
188; Saves the host FPU/SSE/AVX state and restores the guest FPU/SSE/AVX state.
189;
190; @returns 0
191; @param pCpumCpu x86:[esp+4] gcc:rdi msc:rcx CPUMCPU pointer
192;
193align 16
194BEGINPROC cpumR0SaveHostRestoreGuestFPUState
195 ;
196 ; Prologue - xAX+xDX must be free for XSAVE/XRSTOR input.
197 ;
198%ifdef RT_ARCH_AMD64
199 %ifdef RT_OS_WINDOWS
200 mov r11, rcx
201 %else
202 mov r11, rdi
203 %endif
204 %define pCpumCpu r11
205 %define pXState r10
206%else
207 push ebx
208 push esi
209 mov ebx, dword [esp + 4]
210 %define pCpumCpu ebx
211 %define pXState esi
212%endif
213
214 pushf ; The darwin kernel can get upset or upset things if an
215 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
216
217 SAVE_CR0_CLEAR_FPU_TRAPS xCX, xAX ; xCX is now old CR0 value, don't use!
218
219 ;
220 ; Switch state.
221 ;
222 mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateR0]
223
224%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
225 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
226 jz .legacy_mode
227 db 0xea ; jmp far .sixtyfourbit_mode
228 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
229.legacy_mode:
230%endif
231
232%ifdef RT_ARCH_AMD64
233 o64 fxsave [pXState] ; Use explicit REX prefix. See @bugref{6398}.
234
235 ; Restore the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}.
236 mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateR0]
237 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE
238 jnz short .fpu_load_32_or_64
239 fxrstor [pXState]
240 jmp short .fpu_load_done
241.fpu_load_32_or_64:
242 RESTORE_32_OR_64_FPU pCpumCpu, pXState
243.fpu_load_done:
244%else
245 fxsave [pXState]
246 mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateR0]
247 fxrstor [pXState]
248%endif
249
250%ifdef VBOX_WITH_KERNEL_USING_XMM
251 ; Restore the non-volatile xmm registers. ASSUMING 64-bit host.
252 mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateR0]
253 movdqa xmm6, [pXState + XMM_OFF_IN_X86FXSTATE + 060h]
254 movdqa xmm7, [pXState + XMM_OFF_IN_X86FXSTATE + 070h]
255 movdqa xmm8, [pXState + XMM_OFF_IN_X86FXSTATE + 080h]
256 movdqa xmm9, [pXState + XMM_OFF_IN_X86FXSTATE + 090h]
257 movdqa xmm10, [pXState + XMM_OFF_IN_X86FXSTATE + 0a0h]
258 movdqa xmm11, [pXState + XMM_OFF_IN_X86FXSTATE + 0b0h]
259 movdqa xmm12, [pXState + XMM_OFF_IN_X86FXSTATE + 0c0h]
260 movdqa xmm13, [pXState + XMM_OFF_IN_X86FXSTATE + 0d0h]
261 movdqa xmm14, [pXState + XMM_OFF_IN_X86FXSTATE + 0e0h]
262 movdqa xmm15, [pXState + XMM_OFF_IN_X86FXSTATE + 0f0h]
263%endif
264
265.done:
266 RESTORE_CR0 xCX
267 or dword [pCpumCpu + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)
268 popf
269
270%ifdef RT_ARCH_X86
271 pop esi
272 pop ebx
273%endif
274 xor eax, eax
275 ret
276
277%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
278ALIGNCODE(16)
279BITS 64
280.sixtyfourbit_mode:
281 o64 fxsave [pXState]
282
283 ; Restore the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}.
284 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE
285 jnz short .fpu_load_32_or_64_darwin
286 mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateR0]
287 fxrstor [pXState]
288 jmp short .fpu_load_done_darwin
289.fpu_load_32_or_64_darwin:
290 RESTORE_32_OR_64_FPU pCpumCpu, pXState
291.fpu_load_done_darwin:
292
293 jmp far [.fpret wrt rip]
294.fpret: ; 16:32 Pointer to .the_end.
295 dd .done, NAME(SUPR0AbsKernelCS)
296BITS 32
297%endif
298ENDPROC cpumR0SaveHostRestoreGuestFPUState
299
300
301%ifndef RT_ARCH_AMD64
302%ifdef VBOX_WITH_64_BITS_GUESTS
303%ifndef VBOX_WITH_HYBRID_32BIT_KERNEL
304;;
305; Saves the host FPU/SSE/AVX state.
306;
307; @returns VINF_SUCCESS (0) in EAX
308; @param pCpumCpu x86:[esp+4] gcc:rdi msc:rcx CPUMCPU pointer
309;
310align 16
311BEGINPROC cpumR0SaveHostFPUState
312 ;
313 ; Prologue - xAX+xDX must be free for XSAVE/XRSTOR input.
314 ;
315%ifdef RT_ARCH_AMD64
316 %ifdef RT_OS_WINDOWS
317 mov r11, rcx
318 %else
319 mov r11, rdi
320 %endif
321 %define pCpumCpu r11
322 %define pXState r10
323%else
324 push ebx
325 push esi
326 mov ebx, dword [esp + 4]
327 %define pCpumCpu ebx
328 %define pXState esi
329%endif
330
331 pushf ; The darwin kernel can get upset or upset things if an
332 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
333 SAVE_CR0_CLEAR_FPU_TRAPS xCX, xAX ; xCX is now old CR0 value, don't use!
334
335 ;
336 ; Save the host state.
337 ;
338 mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateR0]
339
340%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
341 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
342 jz .legacy_mode
343 db 0xea ; jmp far .sixtyfourbit_mode
344 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
345.legacy_mode:
346%endif
347
348%ifdef RT_ARCH_AMD64
349 o64 fxsave [pXstate]
350%else
351 fxsave [pXState]
352%endif
353
354.done:
355 RESTORE_CR0 xCX
356 or dword [pCpumCpu + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)
357 popf
358
359%ifdef RT_ARCH_X86
360 pop esi
361 pop ebx
362%endif
363 xor eax, eax
364 ret
365
366%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
367ALIGNCODE(16)
368BITS 64
369.sixtyfourbit_mode:
370 ; Save the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}.
371 o64 fxsave [pXstate]
372 jmp far [.fpret wrt rip]
373.fpret: ; 16:32 Pointer to .the_end.
374 dd .done, NAME(SUPR0AbsKernelCS)
375BITS 32
376%endif
377%undef pCpumCpu
378%undef pXState
379ENDPROC cpumR0SaveHostFPUState
380%endif
381%endif
382%endif
383
384
385;;
386; Saves the guest FPU/SSE/AVX state and restores the host FPU/SSE/AVX state.
387;
388; @returns VINF_SUCCESS (0) in eax.
389; @param pCpumCpu x86:[esp+4] gcc:rdi msc:rcx CPUMCPU pointer
390;
391align 16
392BEGINPROC cpumR0SaveGuestRestoreHostFPUState
393 ;
394 ; Prologue - xAX+xDX must be free for XSAVE/XRSTOR input.
395 ;
396%ifdef RT_ARCH_AMD64
397 %ifdef RT_OS_WINDOWS
398 mov r11, rcx
399 %else
400 mov r11, rdi
401 %endif
402 %define pCpumCpu r11
403 %define pXState r10
404%else
405 push ebx
406 push esi
407 mov ebx, dword [esp + 4]
408 %define pCpumCpu ebx
409 %define pXState esi
410%endif
411
412 ;
413 ; Only restore FPU if guest has used it.
414 ;
415 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USED_FPU
416 jz .fpu_not_used
417
418 pushf ; The darwin kernel can get upset or upset things if an
419 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
420 SAVE_CR0_CLEAR_FPU_TRAPS xCX, xAX ; xCX is now old CR0 value, don't use!
421
422 mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateR0]
423
424%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
425 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
426 jz .legacy_mode
427 db 0xea ; jmp far .sixtyfourbit_mode
428 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
429.legacy_mode:
430%endif
431
432%ifdef RT_ARCH_AMD64
433 ; Save the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}.
434 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE
435 jnz short .fpu_save_32_or_64
436 fxsave [pXState]
437 jmp short .fpu_save_done
438.fpu_save_32_or_64:
439 SAVE_32_OR_64_FPU pCpumCpu, pXState
440.fpu_save_done:
441
442 mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateR0]
443 o64 fxrstor [pXState] ; Use explicit REX prefix. See @bugref{6398}.
444%else
445 fxsave [pXState] ; ASSUMES that all VT-x/AMD-V boxes support fxsave/fxrstor (safe assumption)
446 mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateR0]
447 fxrstor [pXState]
448%endif
449
450.done:
451 RESTORE_CR0 xCX
452 and dword [pCpumCpu + CPUMCPU.fUseFlags], ~CPUM_USED_FPU
453 popf
454
455.fpu_not_used:
456%ifdef RT_ARCH_X86
457 pop esi
458 pop ebx
459%endif
460 xor eax, eax
461 ret
462
463%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
464ALIGNCODE(16)
465BITS 64
466.sixtyfourbit_mode:
467 ; Save the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}.
468 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE
469 jnz short .fpu_save_32_or_64_darwin
470 fxsave [pXState]
471 jmp short .fpu_save_done_darwin
472.fpu_save_32_or_64_darwin:
473 SAVE_32_OR_64_FPU pCpumCpu, pXState
474.fpu_save_done_darwin:
475
476 mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateR0]
477 o64 fxrstor [pXstate]
478 jmp far [.fpret wrt rip]
479.fpret: ; 16:32 Pointer to .the_end.
480 dd .done, NAME(SUPR0AbsKernelCS)
481BITS 32
482%endif
483%undef pCpumCpu
484%undef pXState
485ENDPROC cpumR0SaveGuestRestoreHostFPUState
486
487
488;;
489; Restores the host's FPU/SSE/AVX state from pCpumCpu->Host.
490;
491; @returns 0
492; @param pCpumCpu x86:[esp+4] gcc:rdi msc:rcx CPUMCPU pointer
493;
494align 16
495BEGINPROC cpumR0RestoreHostFPUState
496 ;
497 ; Prologue - xAX+xDX must be free for XSAVE/XRSTOR input.
498 ;
499%ifdef RT_ARCH_AMD64
500 %ifdef RT_OS_WINDOWS
501 mov r11, rcx
502 %else
503 mov r11, rdi
504 %endif
505 %define pCpumCpu r11
506 %define pXState r10
507%else
508 push ebx
509 push esi
510 mov ebx, dword [esp + 4]
511 %define pCpumCpu ebx
512 %define pXState esi
513%endif
514
515 ;
516 ; Restore FPU if guest has used it.
517 ;
518 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USED_FPU
519 jz short .fpu_not_used
520
521 pushf ; The darwin kernel can get upset or upset things if an
522 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
523 SAVE_CR0_CLEAR_FPU_TRAPS xCX, xAX ; xCX is now old CR0 value, don't use!
524
525 mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateR0]
526
527%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
528 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
529 jz .legacy_mode
530 db 0xea ; jmp far .sixtyfourbit_mode
531 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
532.legacy_mode:
533%endif
534
535%ifdef RT_ARCH_AMD64
536 o64 fxrstor [pXState]
537%else
538 fxrstor [pXState]
539%endif
540
541.done:
542 RESTORE_CR0 xCX
543 and dword [pCpumCpu + CPUMCPU.fUseFlags], ~CPUM_USED_FPU
544 popf
545
546.fpu_not_used:
547%ifdef RT_ARCH_X86
548 pop esi
549 pop ebx
550%endif
551 xor eax, eax
552 ret
553
554%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
555ALIGNCODE(16)
556BITS 64
557.sixtyfourbit_mode:
558 o64 fxrstor [pXState]
559 jmp far [.fpret wrt rip]
560.fpret: ; 16:32 Pointer to .the_end.
561 dd .done, NAME(SUPR0AbsKernelCS)
562BITS 32
563%endif
564%undef pCpumCPu
565%undef pXState
566ENDPROC cpumR0RestoreHostFPUState
567
568
569%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
570;;
571; DECLASM(void) cpumR0SaveDRx(uint64_t *pa4Regs);
572;
573ALIGNCODE(16)
574BEGINPROC cpumR0SaveDRx
575%ifdef RT_ARCH_AMD64
576 %ifdef ASM_CALL64_GCC
577 mov xCX, rdi
578 %endif
579%else
580 mov xCX, dword [esp + 4]
581%endif
582 pushf ; Just to be on the safe side.
583 cli
584%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
585 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
586 jz .legacy_mode
587 db 0xea ; jmp far .sixtyfourbit_mode
588 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
589.legacy_mode:
590%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
591
592 ;
593 ; Do the job.
594 ;
595 mov xAX, dr0
596 mov xDX, dr1
597 mov [xCX], xAX
598 mov [xCX + 8 * 1], xDX
599 mov xAX, dr2
600 mov xDX, dr3
601 mov [xCX + 8 * 2], xAX
602 mov [xCX + 8 * 3], xDX
603
604.done:
605 popf
606 ret
607
608%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
609ALIGNCODE(16)
610BITS 64
611.sixtyfourbit_mode:
612 and ecx, 0ffffffffh
613
614 mov rax, dr0
615 mov rdx, dr1
616 mov r8, dr2
617 mov r9, dr3
618 mov [rcx], rax
619 mov [rcx + 8 * 1], rdx
620 mov [rcx + 8 * 2], r8
621 mov [rcx + 8 * 3], r9
622 jmp far [.fpret wrt rip]
623.fpret: ; 16:32 Pointer to .the_end.
624 dd .done, NAME(SUPR0AbsKernelCS)
625BITS 32
626%endif
627ENDPROC cpumR0SaveDRx
628
629
630;;
631; DECLASM(void) cpumR0LoadDRx(uint64_t const *pa4Regs);
632;
633ALIGNCODE(16)
634BEGINPROC cpumR0LoadDRx
635%ifdef RT_ARCH_AMD64
636 %ifdef ASM_CALL64_GCC
637 mov xCX, rdi
638 %endif
639%else
640 mov xCX, dword [esp + 4]
641%endif
642 pushf ; Just to be on the safe side.
643 cli
644%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
645 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
646 jz .legacy_mode
647 db 0xea ; jmp far .sixtyfourbit_mode
648 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
649.legacy_mode:
650%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
651
652 ;
653 ; Do the job.
654 ;
655 mov xAX, [xCX]
656 mov xDX, [xCX + 8 * 1]
657 mov dr0, xAX
658 mov dr1, xDX
659 mov xAX, [xCX + 8 * 2]
660 mov xDX, [xCX + 8 * 3]
661 mov dr2, xAX
662 mov dr3, xDX
663
664.done:
665 popf
666 ret
667
668%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
669ALIGNCODE(16)
670BITS 64
671.sixtyfourbit_mode:
672 and ecx, 0ffffffffh
673
674 mov rax, [rcx]
675 mov rdx, [rcx + 8 * 1]
676 mov r8, [rcx + 8 * 2]
677 mov r9, [rcx + 8 * 3]
678 mov dr0, rax
679 mov dr1, rdx
680 mov dr2, r8
681 mov dr3, r9
682 jmp far [.fpret wrt rip]
683.fpret: ; 16:32 Pointer to .the_end.
684 dd .done, NAME(SUPR0AbsKernelCS)
685BITS 32
686%endif
687ENDPROC cpumR0LoadDRx
688
689%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
690
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette