VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/CPUMR0A.asm@ 15749

Last change on this file since 15749 was 15417, checked in by vboxsync, 16 years ago

CPUM: save and restore 64-bit debug registers on darwin (#3202).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 16.9 KB
Line 
1; $Id: CPUMR0A.asm 15417 2008-12-13 06:11:26Z vboxsync $
2;; @file
3; CPUM - Guest Context Assembly Routines.
4;
5
6;
7; Copyright (C) 2006-2007 Sun Microsystems, Inc.
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18; Clara, CA 95054 USA or visit http://www.sun.com if you need
19; additional information or have any questions.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%include "VBox/asmdefs.mac"
26%include "VBox/vm.mac"
27%include "VBox/err.mac"
28%include "VBox/stam.mac"
29%include "CPUMInternal.mac"
30%include "VBox/x86.mac"
31%include "VBox/cpum.mac"
32
33%ifdef IN_RING3
34 %error "The jump table doesn't link on leopard."
35%endif
36
37
38;*******************************************************************************
39;* External Symbols *
40;*******************************************************************************
41%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
42extern NAME(SUPR0AbsIs64bit)
43extern NAME(SUPR0Abs64bitKernelCS)
44extern NAME(SUPR0Abs64bitKernelSS)
45extern NAME(SUPR0Abs64bitKernelDS)
46extern NAME(SUPR0AbsKernelCS)
47%endif
48
49
50;*******************************************************************************
51;* Global Variables *
52;*******************************************************************************
53%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
54BEGINDATA
55;;
56; Store the SUPR0AbsIs64bit absolute value here so we can cmp/test without
57; needing to clobber a register. (This trick doesn't quite work for PE btw.
58; but that's not relevant atm.)
59GLOBALNAME g_fCPUMIs64bitHost
60 dd NAME(SUPR0AbsIs64bit)
61%endif
62
63
64BEGINCODE
65
66
67;;
68; Restores the host's FPU/XMM state
69;
70; @returns 0
71; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer
72;
73align 16
74BEGINPROC cpumR0SaveGuestRestoreHostFPUState
75%ifdef RT_ARCH_AMD64
76 %ifdef RT_OS_WINDOWS
77 mov xDX, rcx
78 %else
79 mov xDX, rdi
80 %endif
81%else
82 mov xDX, dword [esp + 4]
83%endif
84
85 ; Restore FPU if guest has used it.
86 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
87 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU
88 jz short .fpu_not_used
89
90 mov xAX, cr0
91 mov xCX, xAX ; save old CR0
92 and xAX, ~(X86_CR0_TS | X86_CR0_EM)
93 mov cr0, xAX
94
95%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
96 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
97 jz .legacy_mode
98 db 0xea ; jmp far .sixtyfourbit_mode
99 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
100.legacy_mode:
101%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
102
103 fxsave [xDX + CPUMCPU.Guest.fpu]
104 fxrstor [xDX + CPUMCPU.Host.fpu]
105
106.done:
107 mov cr0, xCX ; and restore old CR0 again
108 and dword [xDX + CPUMCPU.fUseFlags], ~CPUM_USED_FPU
109.fpu_not_used:
110 xor eax, eax
111 ret
112
113%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
114ALIGNCODE(16)
115BITS 64
116.sixtyfourbit_mode:
117 and edx, 0ffffffffh
118 fxsave [rdx + CPUMCPU.Guest.fpu]
119 fxrstor [rdx + CPUMCPU.Host.fpu]
120 jmp far [.fpret wrt rip]
121.fpret: ; 16:32 Pointer to .the_end.
122 dd .done, NAME(SUPR0AbsKernelCS)
123BITS 32
124%endif
125ENDPROC cpumR0SaveGuestRestoreHostFPUState
126
127;;
128; Sets the host's FPU/XMM state
129;
130; @returns 0
131; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer
132;
133align 16
134BEGINPROC cpumR0RestoreHostFPUState
135%ifdef RT_ARCH_AMD64
136 %ifdef RT_OS_WINDOWS
137 mov xDX, rcx
138 %else
139 mov xDX, rdi
140 %endif
141%else
142 mov xDX, dword [esp + 4]
143%endif
144
145 ; Restore FPU if guest has used it.
146 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
147 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU
148 jz short .fpu_not_used
149
150 mov xAX, cr0
151 mov xCX, xAX ; save old CR0
152 and xAX, ~(X86_CR0_TS | X86_CR0_EM)
153 mov cr0, xAX
154
155%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
156 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
157 jz .legacy_mode
158 db 0xea ; jmp far .sixtyfourbit_mode
159 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
160.legacy_mode:
161%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
162
163 fxrstor [xDX + CPUMCPU.Host.fpu]
164
165.done:
166 mov cr0, xCX ; and restore old CR0 again
167 and dword [xDX + CPUMCPU.fUseFlags], ~CPUM_USED_FPU
168.fpu_not_used:
169 xor eax, eax
170 ret
171
172%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
173ALIGNCODE(16)
174BITS 64
175.sixtyfourbit_mode:
176 and edx, 0ffffffffh
177 fxrstor [rdx + CPUMCPU.Host.fpu]
178 jmp far [.fpret wrt rip]
179.fpret: ; 16:32 Pointer to .the_end.
180 dd .done, NAME(SUPR0AbsKernelCS)
181BITS 32
182%endif
183ENDPROC cpumR0RestoreHostFPUState
184
185
186;;
187; Restores the guest's FPU/XMM state
188;
189; @param pCtx x86:[esp+4] GCC:rdi MSC:rcx CPUMCTX pointer
190;
191align 16
192BEGINPROC CPUMLoadFPU
193%ifdef RT_ARCH_AMD64
194 %ifdef RT_OS_WINDOWS
195 mov xDX, rcx
196 %else
197 mov xDX, rdi
198 %endif
199%else
200 mov xDX, dword [esp + 4]
201%endif
202%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
203 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
204 jz .legacy_mode
205 db 0xea ; jmp far .sixtyfourbit_mode
206 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
207.legacy_mode:
208%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
209
210 fxrstor [xDX + CPUMCTX.fpu]
211.done:
212 ret
213
214%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
215ALIGNCODE(16)
216BITS 64
217.sixtyfourbit_mode:
218 and edx, 0ffffffffh
219 fxrstor [rdx + CPUMCTX.fpu]
220 jmp far [.fpret wrt rip]
221.fpret: ; 16:32 Pointer to .the_end.
222 dd .done, NAME(SUPR0AbsKernelCS)
223BITS 32
224%endif
225ENDPROC CPUMLoadFPU
226
227
228;;
229; Restores the guest's FPU/XMM state
230;
231; @param pCtx x86:[esp+4] GCC:rdi MSC:rcx CPUMCTX pointer
232;
233align 16
234BEGINPROC CPUMSaveFPU
235%ifdef RT_ARCH_AMD64
236 %ifdef RT_OS_WINDOWS
237 mov xDX, rcx
238 %else
239 mov xDX, rdi
240 %endif
241%else
242 mov xDX, dword [esp + 4]
243%endif
244%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
245 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
246 jz .legacy_mode
247 db 0xea ; jmp far .sixtyfourbit_mode
248 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
249.legacy_mode:
250%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
251 fxsave [xDX + CPUMCTX.fpu]
252.done:
253 ret
254
255%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
256ALIGNCODE(16)
257BITS 64
258.sixtyfourbit_mode:
259 and edx, 0ffffffffh
260 fxsave [rdx + CPUMCTX.fpu]
261 jmp far [.fpret wrt rip]
262.fpret: ; 16:32 Pointer to .the_end.
263 dd .done, NAME(SUPR0AbsKernelCS)
264BITS 32
265%endif
266ENDPROC CPUMSaveFPU
267
268
269;;
270; Restores the guest's XMM state
271;
272; @param pCtx x86:[esp+4] GCC:rdi MSC:rcx CPUMCTX pointer
273;
274align 16
275BEGINPROC CPUMLoadXMM
276%ifdef RT_ARCH_AMD64
277 %ifdef RT_OS_WINDOWS
278 mov xDX, rcx
279 %else
280 mov xDX, rdi
281 %endif
282%else
283 mov xDX, dword [esp + 4]
284%endif
285%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
286 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
287 jz .legacy_mode
288 db 0xea ; jmp far .sixtyfourbit_mode
289 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
290.legacy_mode:
291%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
292
293 movdqa xmm0, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0]
294 movdqa xmm1, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1]
295 movdqa xmm2, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*2]
296 movdqa xmm3, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*3]
297 movdqa xmm4, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*4]
298 movdqa xmm5, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*5]
299 movdqa xmm6, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*6]
300 movdqa xmm7, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*7]
301
302%ifdef RT_ARCH_AMD64
303 test qword [xDX + CPUMCTX.msrEFER], MSR_K6_EFER_LMA
304 jz .done
305
306 movdqa xmm8, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8]
307 movdqa xmm9, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*9]
308 movdqa xmm10, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*10]
309 movdqa xmm11, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*11]
310 movdqa xmm12, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*12]
311 movdqa xmm13, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*13]
312 movdqa xmm14, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*14]
313 movdqa xmm15, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15]
314%endif
315.done:
316
317 ret
318
319%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
320ALIGNCODE(16)
321BITS 64
322.sixtyfourbit_mode:
323 and edx, 0ffffffffh
324
325 movdqa xmm0, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0]
326 movdqa xmm1, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1]
327 movdqa xmm2, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*2]
328 movdqa xmm3, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*3]
329 movdqa xmm4, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*4]
330 movdqa xmm5, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*5]
331 movdqa xmm6, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*6]
332 movdqa xmm7, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*7]
333
334 test qword [rdx + CPUMCTX.msrEFER], MSR_K6_EFER_LMA
335 jz .sixtyfourbit_done
336
337 movdqa xmm8, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8]
338 movdqa xmm9, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*9]
339 movdqa xmm10, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*10]
340 movdqa xmm11, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*11]
341 movdqa xmm12, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*12]
342 movdqa xmm13, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*13]
343 movdqa xmm14, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*14]
344 movdqa xmm15, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15]
345.sixtyfourbit_done:
346 jmp far [.fpret wrt rip]
347.fpret: ; 16:32 Pointer to .the_end.
348 dd .done, NAME(SUPR0AbsKernelCS)
349BITS 32
350%endif
351ENDPROC CPUMLoadXMM
352
353
354;;
355; Restores the guest's XMM state
356;
357; @param pCtx x86:[esp+4] GCC:rdi MSC:rcx CPUMCTX pointer
358;
359align 16
360BEGINPROC CPUMSaveXMM
361%ifdef RT_ARCH_AMD64
362 %ifdef RT_OS_WINDOWS
363 mov xDX, rcx
364 %else
365 mov xDX, rdi
366 %endif
367%else
368 mov xDX, dword [esp + 4]
369%endif
370%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
371 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
372 jz .legacy_mode
373 db 0xea ; jmp far .sixtyfourbit_mode
374 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
375.legacy_mode:
376%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
377
378 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0], xmm0
379 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1], xmm1
380 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*2], xmm2
381 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*3], xmm3
382 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*4], xmm4
383 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*5], xmm5
384 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*6], xmm6
385 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*7], xmm7
386
387%ifdef RT_ARCH_AMD64
388 test qword [xDX + CPUMCTX.msrEFER], MSR_K6_EFER_LMA
389 jz .done
390
391 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8], xmm8
392 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*9], xmm9
393 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*10], xmm10
394 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*11], xmm11
395 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*12], xmm12
396 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*13], xmm13
397 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*14], xmm14
398 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15], xmm15
399
400%endif
401.done:
402 ret
403
404%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
405ALIGNCODE(16)
406BITS 64
407.sixtyfourbit_mode:
408 and edx, 0ffffffffh
409
410 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0], xmm0
411 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1], xmm1
412 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*2], xmm2
413 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*3], xmm3
414 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*4], xmm4
415 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*5], xmm5
416 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*6], xmm6
417 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*7], xmm7
418
419 test qword [rdx + CPUMCTX.msrEFER], MSR_K6_EFER_LMA
420 jz .sixtyfourbit_done
421
422 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8], xmm8
423 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*9], xmm9
424 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*10], xmm10
425 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*11], xmm11
426 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*12], xmm12
427 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*13], xmm13
428 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*14], xmm14
429 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15], xmm15
430
431.sixtyfourbit_done:
432 jmp far [.fpret wrt rip]
433.fpret: ; 16:32 Pointer to .the_end.
434 dd .done, NAME(SUPR0AbsKernelCS)
435BITS 32
436%endif
437ENDPROC CPUMSaveXMM
438
439
440;;
441; Set the FPU control word; clearing exceptions first
442;
443; @param u16FCW x86:[esp+4] GCC:rdi MSC:rcx New FPU control word
444align 16
445BEGINPROC cpumR0SetFCW
446%ifdef RT_ARCH_AMD64
447 %ifdef RT_OS_WINDOWS
448 mov xAX, rcx
449 %else
450 mov xAX, rdi
451 %endif
452%else
453 mov xAX, dword [esp + 4]
454%endif
455 fnclex
456 push xAX
457 fldcw [xSP]
458 pop xAX
459 ret
460ENDPROC cpumR0SetFCW
461
462
463;;
464; Get the FPU control word
465;
466align 16
467BEGINPROC cpumR0GetFCW
468 fnstcw [xSP - 8]
469 mov ax, word [xSP - 8]
470 ret
471ENDPROC cpumR0GetFCW
472
473
474;;
475; Set the MXCSR;
476;
477; @param u32MXCSR x86:[esp+4] GCC:rdi MSC:rcx New MXCSR
478align 16
479BEGINPROC cpumR0SetMXCSR
480%ifdef RT_ARCH_AMD64
481 %ifdef RT_OS_WINDOWS
482 mov xAX, rcx
483 %else
484 mov xAX, rdi
485 %endif
486%else
487 mov xAX, dword [esp + 4]
488%endif
489 push xAX
490 ldmxcsr [xSP]
491 pop xAX
492 ret
493ENDPROC cpumR0SetMXCSR
494
495
496;;
497; Get the MXCSR
498;
499align 16
500BEGINPROC cpumR0GetMXCSR
501 stmxcsr [xSP - 8]
502 mov eax, dword [xSP - 8]
503 ret
504ENDPROC cpumR0GetMXCSR
505
506
507%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
508;;
509; DECLASM(void) cpumR0SaveDRx(uint64_t *pa4Regs);
510;
511ALIGNCODE(16)
512BEGINPROC cpumR0SaveDRx
513%ifdef RT_ARCH_AMD64
514 %ifdef ASM_CALL64_GCC
515 mov xCX, rdi
516 %endif
517%else
518 mov xCX, dword [esp + 4]
519%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
520 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
521 jz .legacy_mode
522 db 0xea ; jmp far .sixtyfourbit_mode
523 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
524.legacy_mode:
525%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
526%endif
527
528 ;
529 ; Do the job.
530 ;
531 mov xAX, dr0
532 mov xDX, dr1
533 mov [xCX], xAX
534 mov [xCX + 8 * 1], xDX
535 mov xAX, dr2
536 mov xDX, dr3
537 mov [xCX + 8 * 2], xAX
538 mov [xCX + 8 * 3], xDX
539
540.done:
541 ret
542
543%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
544ALIGNCODE(16)
545BITS 64
546.sixtyfourbit_mode:
547 and ecx, 0ffffffffh
548
549 mov rax, dr0
550 mov rdx, dr1
551 mov r8, dr2
552 mov r9, dr3
553 mov [rcx], rax
554 mov [rcx + 8 * 1], rdx
555 mov [rcx + 8 * 2], r8
556 mov [rcx + 8 * 3], r9
557 jmp far [.fpret wrt rip]
558.fpret: ; 16:32 Pointer to .the_end.
559 dd .done, NAME(SUPR0AbsKernelCS)
560BITS 32
561%endif
562ENDPROC cpumR0SaveDRx
563
564
565;;
566; DECLASM(void) cpumR0LoadDRx(uint64_t const *pa4Regs);
567;
568ALIGNCODE(16)
569BEGINPROC cpumR0LoadDRx
570%ifdef RT_ARCH_AMD64
571 %ifdef ASM_CALL64_GCC
572 mov xCX, rdi
573 %endif
574%else
575 mov xCX, dword [esp + 4]
576%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
577 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
578 jz .legacy_mode
579 db 0xea ; jmp far .sixtyfourbit_mode
580 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
581.legacy_mode:
582%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
583%endif
584
585 ;
586 ; Do the job.
587 ;
588 mov xAX, [xCX]
589 mov xDX, [xCX + 8 * 1]
590 mov dr0, xAX
591 mov dr1, xDX
592 mov xAX, [xCX + 8 * 2]
593 mov xDX, [xCX + 8 * 3]
594 mov dr2, xAX
595 mov dr3, xDX
596
597.done:
598 ret
599
600%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
601ALIGNCODE(16)
602BITS 64
603.sixtyfourbit_mode:
604 and ecx, 0ffffffffh
605
606 mov rax, [rcx]
607 mov rdx, [rcx + 8 * 1]
608 mov r8, [rcx + 8 * 2]
609 mov r9, [rcx + 8 * 3]
610 mov dr0, rax
611 mov dr1, rdx
612 mov dr2, r8
613 mov dr3, r9
614 jmp far [.fpret wrt rip]
615.fpret: ; 16:32 Pointer to .the_end.
616 dd .done, NAME(SUPR0AbsKernelCS)
617BITS 32
618%endif
619ENDPROC cpumR0LoadDRx
620
621%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette