VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/CPUMR0A.asm@ 52419

Last change on this file since 52419 was 52419, checked in by vboxsync, 10 years ago

VMM: Fix restoring 32-bit guest FPU state on 64-bit capable VMs.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 17.7 KB
Line 
1; $Id: CPUMR0A.asm 52419 2014-08-19 16:12:46Z vboxsync $
2;; @file
3; CPUM - Guest Context Assembly Routines.
4;
5
6;
7; Copyright (C) 2006-2013 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18;*******************************************************************************
19;* Header Files *
20;*******************************************************************************
21%include "VBox/asmdefs.mac"
22%include "VBox/vmm/vm.mac"
23%include "VBox/err.mac"
24%include "VBox/vmm/stam.mac"
25%include "CPUMInternal.mac"
26%include "iprt/x86.mac"
27%include "VBox/vmm/cpum.mac"
28
29%ifdef IN_RING3
30 %error "The jump table doesn't link on leopard."
31%endif
32
33;*******************************************************************************
34;* Defined Constants And Macros *
35;*******************************************************************************
36;; The offset of the XMM registers in X86FXSTATE.
37; Use define because I'm too lazy to convert the struct.
38%define XMM_OFF_IN_X86FXSTATE 160
39%define RSVD_OFF_IN_X86FXSTATE 2ch ; Reserved upper 32-bit part of ST(0)/MM0.
40%define IP_OFF_IN_X86FXSTATE 08h
41%define CS_OFF_IN_X86FXSTATE 0ch
42%define DS_OFF_IN_X86FXSTATE 14h
43
44; Must fit into the dword (32-bits) at RSVD_OFF_IN_X86FXSTATE.
45%define FPUSTATE_32BIT_MAGIC 032b3232bh
46
47
48;*******************************************************************************
49;* External Symbols *
50;*******************************************************************************
51%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
52extern NAME(SUPR0AbsIs64bit)
53extern NAME(SUPR0Abs64bitKernelCS)
54extern NAME(SUPR0Abs64bitKernelSS)
55extern NAME(SUPR0Abs64bitKernelDS)
56extern NAME(SUPR0AbsKernelCS)
57%endif
58
59
60;*******************************************************************************
61;* Global Variables *
62;*******************************************************************************
63%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
64BEGINDATA
65;;
66; Store the SUPR0AbsIs64bit absolute value here so we can cmp/test without
67; needing to clobber a register. (This trick doesn't quite work for PE btw.
68; but that's not relevant atm.)
69GLOBALNAME g_fCPUMIs64bitHost
70 dd NAME(SUPR0AbsIs64bit)
71%endif
72
73
74BEGINCODE
75
76;; Macro for FXSAVE/FXRSTOR leaky behaviour on AMD CPUs, see cpumR3CheckLeakyFpu().
77; Cleans the FPU state, if necessary, before restoring the FPU.
78;
79; This macro ASSUMES CR0.TS is not set!
80; @remarks Trashes xAX!!
81; Changes here should also be reflected in CPUMRCA.asm's copy!
82%macro CLEANFPU 0
83 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USE_FFXSR_LEAKY
84 jz .nothing_to_clean
85
86 xor eax, eax
87 fnstsw ax ; Get FSW
88 test eax, RT_BIT(7) ; If FSW.ES (bit 7) is set, clear it to not cause FPU exceptions
89 ; while clearing & loading the FPU bits in 'clean_fpu'
90 jz .clean_fpu
91 fnclex
92
93.clean_fpu:
94 ffree st7 ; Clear FPU stack register(7)'s tag entry to prevent overflow if a wraparound occurs
95 ; for the upcoming push (load)
96 fild dword [xDX + CPUMCPU.Guest.fpu] ; Explicit FPU load to overwrite FIP, FOP, FDP registers in the FPU.
97
98.nothing_to_clean:
99%endmacro
100
101
102;; Macro for FXSAVE for the guest FPU but tries to figure out whether to
103; save the 32-bit FPU state or 64-bit FPU state.
104;
105; @remarks Requires CPUMCPU pointer in RDX
106%macro SAVE_32_OR_64_FPU 0
107 o64 fxsave [rdx + CPUMCPU.Guest.fpu]
108
109 ; Shouldn't be necessary to check if the entire 64-bit FIP is 0 (i.e. guest hasn't used its FPU yet) because it should
110 ; be taken care of by the calling code, i.e. hmR0[Vmx|Svm]LoadSharedCR0() and hmR0[Vmx|Svm]ExitXcptNm() which ensure
111 ; we swap the guest FPU state when it starts using it (#NM). In any case it's only a performance optimization.
112 ; cmp qword [rdx + CPUMCPU.Guest.fpu + IP_OFF_IN_X86FXSTATE], 0
113 ; je short %%save_done
114
115 cmp dword [rdx + CPUMCPU.Guest.fpu + CS_OFF_IN_X86FXSTATE], 0
116 jne short %%save_done
117 sub rsp, 20h ; Only need 1ch bytes but keep stack aligned otherwise we #GP(0)
118 fnstenv [rsp]
119 movzx eax, word [rsp + 10h]
120 mov [rdx + CPUMCPU.Guest.fpu + CS_OFF_IN_X86FXSTATE], eax
121 movzx eax, word [rsp + 18h]
122 mov [rdx + CPUMCPU.Guest.fpu + DS_OFF_IN_X86FXSTATE], eax
123 add rsp, 20h
124 mov dword [rdx + CPUMCPU.Guest.fpu + RSVD_OFF_IN_X86FXSTATE], FPUSTATE_32BIT_MAGIC
125%%save_done:
126%endmacro
127
128;; Macro for FXRSTOR for the guest FPU but loads the one based on what
129; was saved before using SAVE_32_OR_64_FPU().
130;
131; @remarks Requires CPUMCPU pointer in RDX
132%macro RESTORE_32_OR_64_FPU 0
133 cmp dword [rdx + CPUMCPU.Guest.fpu + RSVD_OFF_IN_X86FXSTATE], FPUSTATE_32BIT_MAGIC
134 jne short %%restore_64bit_fpu
135 ; We probably don't need to wipe out the reserved field - safer this way due to our limited testing
136 mov word [rdx + CPUMCPU.Guest.fpu + RSVD_OFF_IN_X86FXSTATE], 0
137 fxrstor [rdx + CPUMCPU.Guest.fpu]
138 mov dword [rdx + CPUMCPU.Guest.fpu + RSVD_OFF_IN_X86FXSTATE], FPUSTATE_32BIT_MAGIC
139 jmp short %%restore_fpu_done
140%%restore_64bit_fpu:
141 o64 fxrstor [rdx + CPUMCPU.Guest.fpu]
142%%restore_fpu_done:
143%endmacro
144
145
146;; Macro to save and modify CR0 (if necessary) before touching the FPU state
147; so as to not cause any FPU exceptions.
148;
149; @remarks Uses xCX for backing-up CR0 (if CR0 needs to be modified) otherwise clears xCX.
150; @remarks Trashes xAX.
151%macro SAVE_CR0_CLEAR_FPU_TRAPS 0
152 xor ecx, ecx
153 mov xAX, cr0
154 test eax, X86_CR0_TS | X86_CR0_EM ; Make sure its safe to access the FPU state.
155 jz %%skip_cr0_write
156 mov xCX, xAX ; Save old CR0
157 and xAX, ~(X86_CR0_TS | X86_CR0_EM)
158 mov cr0, xAX
159%%skip_cr0_write:
160%endmacro
161
162;; Macro to restore CR0 from xCX if necessary.
163;
164; @remarks xCX should contain the CR0 value to restore or 0 if no restoration is needed.
165%macro RESTORE_CR0 0
166 cmp ecx, 0
167 je %%skip_cr0_restore
168 mov cr0, xCX
169%%skip_cr0_restore:
170%endmacro
171
172
173;;
174; Saves the host FPU/XMM state and restores the guest state.
175;
176; @returns 0
177; @param pCPUMCPU x86:[esp+4] gcc:rdi msc:rcx CPUMCPU pointer
178;
179align 16
180BEGINPROC cpumR0SaveHostRestoreGuestFPUState
181%ifdef RT_ARCH_AMD64
182 %ifdef RT_OS_WINDOWS
183 mov xDX, rcx
184 %else
185 mov xDX, rdi
186 %endif
187%else
188 mov xDX, dword [esp + 4]
189%endif
190 pushf ; The darwin kernel can get upset or upset things if an
191 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
192
193 ; Switch the state.
194 or dword [xDX + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)
195
196 ; Clear CR0 FPU bits to not cause exceptions, uses xCX
197 SAVE_CR0_CLEAR_FPU_TRAPS
198 ; Do NOT use xCX from this point!
199
200%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
201 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
202 jz .legacy_mode
203 db 0xea ; jmp far .sixtyfourbit_mode
204 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
205.legacy_mode:
206%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
207
208%ifdef RT_ARCH_AMD64
209 ; Use explicit REX prefix. See @bugref{6398}.
210 o64 fxsave [rdx + CPUMCPU.Host.fpu] ; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor (safe assumption)
211
212 ; Restore the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}.
213 test dword [rdx + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE
214 jnz short .fpu_load_32_or_64
215 fxrstor [rdx + CPUMCPU.Guest.fpu]
216 jmp short .fpu_load_done
217.fpu_load_32_or_64:
218 RESTORE_32_OR_64_FPU
219.fpu_load_done:
220%else
221 fxsave [edx + CPUMCPU.Host.fpu] ; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor (safe assumption)
222 fxrstor [edx + CPUMCPU.Guest.fpu]
223%endif
224
225%ifdef VBOX_WITH_KERNEL_USING_XMM
226 ; Restore the non-volatile xmm registers. ASSUMING 64-bit windows
227 lea r11, [xDX + CPUMCPU.Host.fpu + XMM_OFF_IN_X86FXSTATE]
228 movdqa xmm6, [r11 + 060h]
229 movdqa xmm7, [r11 + 070h]
230 movdqa xmm8, [r11 + 080h]
231 movdqa xmm9, [r11 + 090h]
232 movdqa xmm10, [r11 + 0a0h]
233 movdqa xmm11, [r11 + 0b0h]
234 movdqa xmm12, [r11 + 0c0h]
235 movdqa xmm13, [r11 + 0d0h]
236 movdqa xmm14, [r11 + 0e0h]
237 movdqa xmm15, [r11 + 0f0h]
238%endif
239
240.done:
241 ; Restore CR0 from xCX if it was previously saved.
242 RESTORE_CR0
243 popf
244 xor eax, eax
245 ret
246
247%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
248ALIGNCODE(16)
249BITS 64
250.sixtyfourbit_mode:
251 and edx, 0ffffffffh
252 o64 fxsave [rdx + CPUMCPU.Host.fpu]
253
254 ; Restore the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}.
255 test dword [rdx + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE
256 jnz short .fpu_load_32_or_64_darwin
257 fxrstor [rdx + CPUMCPU.Guest.fpu]
258 jmp short .fpu_load_done_darwin
259.fpu_load_32_or_64_darwin:
260 RESTORE_32_OR_64_FPU
261.fpu_load_done_darwin:
262
263 jmp far [.fpret wrt rip]
264.fpret: ; 16:32 Pointer to .the_end.
265 dd .done, NAME(SUPR0AbsKernelCS)
266BITS 32
267%endif
268ENDPROC cpumR0SaveHostRestoreGuestFPUState
269
270
271%ifndef RT_ARCH_AMD64
272%ifdef VBOX_WITH_64_BITS_GUESTS
273%ifndef VBOX_WITH_HYBRID_32BIT_KERNEL
274;;
275; Saves the host FPU/XMM state
276;
277; @returns 0
278; @param pCPUMCPU x86:[esp+4] gcc:rdi msc:rcx CPUMCPU pointer
279;
280align 16
281BEGINPROC cpumR0SaveHostFPUState
282 mov xDX, dword [esp + 4]
283 pushf ; The darwin kernel can get upset or upset things if an
284 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
285
286 ; Switch the state.
287 or dword [xDX + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)
288
289 ; Clear CR0 FPU bits to not cause exceptions, uses xCX
290 SAVE_CR0_CLEAR_FPU_TRAPS
291 ; Do NOT use xCX from this point!
292
293 fxsave [xDX + CPUMCPU.Host.fpu] ; ASSUMES that all VT-x/AMD-V boxes support fxsave/fxrstor (safe assumption)
294
295 ; Restore CR0 from xCX if it was saved previously.
296 RESTORE_CR0
297
298 popf
299 xor eax, eax
300 ret
301ENDPROC cpumR0SaveHostFPUState
302%endif
303%endif
304%endif
305
306
307;;
308; Saves the guest FPU/XMM state and restores the host state.
309;
310; @returns 0
311; @param pCPUMCPU x86:[esp+4] gcc:rdi msc:rcx CPUMCPU pointer
312;
313align 16
314BEGINPROC cpumR0SaveGuestRestoreHostFPUState
315%ifdef RT_ARCH_AMD64
316 %ifdef RT_OS_WINDOWS
317 mov xDX, rcx
318 %else
319 mov xDX, rdi
320 %endif
321%else
322 mov xDX, dword [esp + 4]
323%endif
324
325 ; Only restore FPU if guest has used it.
326 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
327 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU
328 jz .fpu_not_used
329
330 pushf ; The darwin kernel can get upset or upset things if an
331 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
332
333 ; Clear CR0 FPU bits to not cause exceptions, uses xCX
334 SAVE_CR0_CLEAR_FPU_TRAPS
335 ; Do NOT use xCX from this point!
336
337%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
338 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
339 jz .legacy_mode
340 db 0xea ; jmp far .sixtyfourbit_mode
341 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
342.legacy_mode:
343%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
344
345%ifdef RT_ARCH_AMD64
346 ; Save the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}.
347 test dword [rdx + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE
348 jnz short .fpu_save_32_or_64
349 fxsave [rdx + CPUMCPU.Guest.fpu]
350 jmp short .fpu_save_done
351.fpu_save_32_or_64:
352 SAVE_32_OR_64_FPU
353.fpu_save_done:
354
355 ; Use explicit REX prefix. See @bugref{6398}.
356 o64 fxrstor [rdx + CPUMCPU.Host.fpu]
357%else
358 fxsave [edx + CPUMCPU.Guest.fpu] ; ASSUMES that all VT-x/AMD-V boxes support fxsave/fxrstor (safe assumption)
359 fxrstor [edx + CPUMCPU.Host.fpu]
360%endif
361
362.done:
363 ; Restore CR0 from xCX if it was previously saved.
364 RESTORE_CR0
365 and dword [xDX + CPUMCPU.fUseFlags], ~CPUM_USED_FPU
366 popf
367.fpu_not_used:
368 xor eax, eax
369 ret
370
371%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
372ALIGNCODE(16)
373BITS 64
374.sixtyfourbit_mode:
375 and edx, 0ffffffffh
376
377 ; Save the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}.
378 test dword [rdx + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE
379 jnz short .fpu_save_32_or_64_darwin
380 fxsave [rdx + CPUMCPU.Guest.fpu]
381 jmp short .fpu_save_done_darwin
382.fpu_save_32_or_64_darwin:
383 SAVE_32_OR_64_FPU
384.fpu_save_done_darwin:
385
386 o64 fxrstor [rdx + CPUMCPU.Host.fpu]
387 jmp far [.fpret wrt rip]
388.fpret: ; 16:32 Pointer to .the_end.
389 dd .done, NAME(SUPR0AbsKernelCS)
390BITS 32
391%endif
392ENDPROC cpumR0SaveGuestRestoreHostFPUState
393
394
395;;
396; Sets the host's FPU/XMM state
397;
398; @returns 0
399; @param pCPUMCPU x86:[esp+4] gcc:rdi msc:rcx CPUMCPU pointer
400;
401align 16
402BEGINPROC cpumR0RestoreHostFPUState
403%ifdef RT_ARCH_AMD64
404 %ifdef RT_OS_WINDOWS
405 mov xDX, rcx
406 %else
407 mov xDX, rdi
408 %endif
409%else
410 mov xDX, dword [esp + 4]
411%endif
412
413 ; Restore FPU if guest has used it.
414 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
415 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU
416 jz short .fpu_not_used
417
418 pushf ; The darwin kernel can get upset or upset things if an
419 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
420
421 ; Clear CR0 FPU bits to not cause exceptions, uses xCX
422 SAVE_CR0_CLEAR_FPU_TRAPS
423 ; Do NOT use xCX from this point!
424
425%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
426 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
427 jz .legacy_mode
428 db 0xea ; jmp far .sixtyfourbit_mode
429 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
430.legacy_mode:
431%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
432
433%ifdef RT_ARCH_AMD64
434 o64 fxrstor [xDX + CPUMCPU.Host.fpu]
435%else
436 fxrstor [xDX + CPUMCPU.Host.fpu]
437%endif
438
439.done:
440 ; Restore CR0 from xCX if it was previously saved.
441 RESTORE_CR0
442 and dword [xDX + CPUMCPU.fUseFlags], ~CPUM_USED_FPU
443 popf
444.fpu_not_used:
445 xor eax, eax
446 ret
447
448%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
449ALIGNCODE(16)
450BITS 64
451.sixtyfourbit_mode:
452 and edx, 0ffffffffh
453 o64 fxrstor [rdx + CPUMCPU.Host.fpu]
454 jmp far [.fpret wrt rip]
455.fpret: ; 16:32 Pointer to .the_end.
456 dd .done, NAME(SUPR0AbsKernelCS)
457BITS 32
458%endif
459ENDPROC cpumR0RestoreHostFPUState
460
461
462%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
463;;
464; DECLASM(void) cpumR0SaveDRx(uint64_t *pa4Regs);
465;
466ALIGNCODE(16)
467BEGINPROC cpumR0SaveDRx
468%ifdef RT_ARCH_AMD64
469 %ifdef ASM_CALL64_GCC
470 mov xCX, rdi
471 %endif
472%else
473 mov xCX, dword [esp + 4]
474%endif
475 pushf ; Just to be on the safe side.
476 cli
477%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
478 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
479 jz .legacy_mode
480 db 0xea ; jmp far .sixtyfourbit_mode
481 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
482.legacy_mode:
483%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
484
485 ;
486 ; Do the job.
487 ;
488 mov xAX, dr0
489 mov xDX, dr1
490 mov [xCX], xAX
491 mov [xCX + 8 * 1], xDX
492 mov xAX, dr2
493 mov xDX, dr3
494 mov [xCX + 8 * 2], xAX
495 mov [xCX + 8 * 3], xDX
496
497.done:
498 popf
499 ret
500
501%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
502ALIGNCODE(16)
503BITS 64
504.sixtyfourbit_mode:
505 and ecx, 0ffffffffh
506
507 mov rax, dr0
508 mov rdx, dr1
509 mov r8, dr2
510 mov r9, dr3
511 mov [rcx], rax
512 mov [rcx + 8 * 1], rdx
513 mov [rcx + 8 * 2], r8
514 mov [rcx + 8 * 3], r9
515 jmp far [.fpret wrt rip]
516.fpret: ; 16:32 Pointer to .the_end.
517 dd .done, NAME(SUPR0AbsKernelCS)
518BITS 32
519%endif
520ENDPROC cpumR0SaveDRx
521
522
523;;
524; DECLASM(void) cpumR0LoadDRx(uint64_t const *pa4Regs);
525;
526ALIGNCODE(16)
527BEGINPROC cpumR0LoadDRx
528%ifdef RT_ARCH_AMD64
529 %ifdef ASM_CALL64_GCC
530 mov xCX, rdi
531 %endif
532%else
533 mov xCX, dword [esp + 4]
534%endif
535 pushf ; Just to be on the safe side.
536 cli
537%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
538 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
539 jz .legacy_mode
540 db 0xea ; jmp far .sixtyfourbit_mode
541 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
542.legacy_mode:
543%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
544
545 ;
546 ; Do the job.
547 ;
548 mov xAX, [xCX]
549 mov xDX, [xCX + 8 * 1]
550 mov dr0, xAX
551 mov dr1, xDX
552 mov xAX, [xCX + 8 * 2]
553 mov xDX, [xCX + 8 * 3]
554 mov dr2, xAX
555 mov dr3, xDX
556
557.done:
558 popf
559 ret
560
561%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
562ALIGNCODE(16)
563BITS 64
564.sixtyfourbit_mode:
565 and ecx, 0ffffffffh
566
567 mov rax, [rcx]
568 mov rdx, [rcx + 8 * 1]
569 mov r8, [rcx + 8 * 2]
570 mov r9, [rcx + 8 * 3]
571 mov dr0, rax
572 mov dr1, rdx
573 mov dr2, r8
574 mov dr3, r9
575 jmp far [.fpret wrt rip]
576.fpret: ; 16:32 Pointer to .the_end.
577 dd .done, NAME(SUPR0AbsKernelCS)
578BITS 32
579%endif
580ENDPROC cpumR0LoadDRx
581
582%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
583
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette