VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMSwitcher/AMD64andLegacy.mac@ 48366

Last change on this file since 48366 was 47844, checked in by vboxsync, 11 years ago

VMM: X2APIC + NMI. Only tested on AMD64.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 36.2 KB
Line 
1; $Id: AMD64andLegacy.mac 47844 2013-08-19 14:03:17Z vboxsync $
2;; @file
3; VMM - World Switchers, template for AMD64 to PAE and 32-bit.
4;
5
6;
7; Copyright (C) 2006-2013 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18;%define DEBUG_STUFF 1
19;%define STRICT_IF 1
20
21;*******************************************************************************
22;* Header Files *
23;*******************************************************************************
24%include "VBox/asmdefs.mac"
25%include "VBox/apic.mac"
26%include "iprt/x86.mac"
27%include "VBox/vmm/cpum.mac"
28%include "VBox/vmm/stam.mac"
29%include "VBox/vmm/vm.mac"
30%include "VBox/err.mac"
31%include "CPUMInternal.mac"
32%include "VMMSwitcher.mac"
33
34
35;
36; Start the fixup records
37; We collect the fixups in the .data section as we go along
38; It is therefore VITAL that no-one is using the .data section
39; for anything else between 'Start' and 'End'.
40;
41BEGINDATA
42GLOBALNAME Fixups
43
44
45
46BEGINCODE
47GLOBALNAME Start
48
49%ifndef VBOX_WITH_HYBRID_32BIT_KERNEL
50BITS 64
51
52;;
53; The C interface.
54;
55; @param pVM GCC: rdi MSC:rcx The VM handle.
56;
57BEGINPROC vmmR0ToRawMode
58%ifdef DEBUG_STUFF
59 COM64_S_NEWLINE
60 COM64_S_CHAR '^'
61%endif
62 ;
63 ; The ordinary version of the code.
64 ;
65
66 %ifdef STRICT_IF
67 pushf
68 pop rax
69 test eax, X86_EFL_IF
70 jz .if_clear_in
71 mov eax, 0c0ffee00h
72 ret
73.if_clear_in:
74 %endif
75
76 ;
77 ; make r9 = pVM and rdx = pCpum.
78 ; rax, rcx and r8 are scratch here after.
79 %ifdef RT_OS_WINDOWS
80 mov r9, rcx
81 %else
82 mov r9, rdi
83 %endif
84 lea rdx, [r9 + VM.cpum]
85
86 %ifdef VBOX_WITH_STATISTICS
87 ;
88 ; Switcher stats.
89 ;
90 lea r8, [r9 + VM.StatSwitcherToGC]
91 STAM64_PROFILE_ADV_START r8
92 %endif
93
94 ;
95 ; Call worker (far return).
96 ;
97 mov eax, cs
98 push rax
99 call NAME(vmmR0ToRawModeAsm)
100
101 %ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
102 ; Unblock Local APIC NMI vectors
103 ; Do this here to ensure the host CS is already restored
104 mov r8d, [rdx + CPUM.offCPUMCPU0]
105 mov ecx, [rdx + r8 + CPUMCPU.fApicDisVectors]
106 test ecx, ecx
107 jz gth64_apic_done
108 cmp byte [rdx + r8 + CPUMCPU.fX2Apic], 1
109 je gth64_x2apic
110
111 ; Legacy APIC mode:
112 mov r8, [rdx + r8 + CPUMCPU.pvApicBase]
113 shr ecx, 1
114 jnc gth64_nolint0
115 and dword [r8 + APIC_REG_LVT_LINT0], ~APIC_REG_LVT_MASKED
116gth64_nolint0:
117 shr ecx, 1
118 jnc gth64_nolint1
119 and dword [r8 + APIC_REG_LVT_LINT1], ~APIC_REG_LVT_MASKED
120gth64_nolint1:
121 shr ecx, 1
122 jnc gth64_nopc
123 and dword [r8 + APIC_REG_LVT_PC], ~APIC_REG_LVT_MASKED
124gth64_nopc:
125 shr ecx, 1
126 jnc gth64_notherm
127 and dword [r8 + APIC_REG_LVT_THMR], ~APIC_REG_LVT_MASKED
128gth64_notherm:
129 jmp gth64_apic_done
130
131 ; X2 APIC mode:
132gth64_x2apic:
133 mov r8, rax ; save rax
134 mov r10, rcx
135 shr r10d, 1
136 jnc gth64_x2_nolint0
137 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT0 >> 4)
138 rdmsr
139 and eax, ~APIC_REG_LVT_MASKED
140 wrmsr
141gth64_x2_nolint0:
142 shr r10d, 1
143 jnc gth64_x2_nolint1
144 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT1 >> 4)
145 rdmsr
146 and eax, ~APIC_REG_LVT_MASKED
147 wrmsr
148gth64_x2_nolint1:
149 shr r10d, 1
150 jnc gth64_x2_nopc
151 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_PC >> 4)
152 rdmsr
153 and eax, ~APIC_REG_LVT_MASKED
154 wrmsr
155gth64_x2_nopc:
156 shr r10d, 1
157 jnc gth64_x2_notherm
158 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_THMR >> 4)
159 rdmsr
160 and eax, ~APIC_REG_LVT_MASKED
161 wrmsr
162gth64_x2_notherm:
163 mov rax, r8 ; restore rax
164
165gth64_apic_done:
166 %endif
167
168 %ifdef VBOX_WITH_STATISTICS
169 ;
170 ; Switcher stats.
171 ;
172 lea r8, [r9 + VM.StatSwitcherToGC]
173 STAM64_PROFILE_ADV_STOP r8
174 %endif
175
176 ret
177ENDPROC vmmR0ToRawMode
178
179
180%else ; VBOX_WITH_HYBRID_32BIT_KERNEL
181
182
183BITS 32
184
185;;
186; The C interface.
187;
188BEGINPROC vmmR0ToRawMode
189 %ifdef DEBUG_STUFF
190 COM32_S_NEWLINE
191 COM32_S_CHAR '^'
192 %endif
193
194 %ifdef VBOX_WITH_STATISTICS
195 ;
196 ; Switcher stats.
197 ;
198 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToGC
199 mov edx, 0ffffffffh
200 STAM_PROFILE_ADV_START edx
201 %endif
202
203 ; Thunk to/from 64 bit when invoking the worker routine.
204 ;
205 FIXUP FIX_HC_VM_OFF, 1, VM.cpum
206 mov edx, 0ffffffffh
207
208 push 0
209 push cs
210 push 0
211 FIXUP FIX_HC_32BIT, 1, .vmmR0ToRawModeReturn - NAME(Start)
212 push 0ffffffffh
213
214 FIXUP FIX_HC_64BIT_CS, 1
215 push 0ffffh
216 FIXUP FIX_HC_32BIT, 1, NAME(vmmR0ToRawModeAsm) - NAME(Start)
217 push 0ffffffffh
218 retf
219.vmmR0ToRawModeReturn:
220
221 ;
222 ; This selector reloading is probably not necessary, but we do it anyway to be quite sure
223 ; the CPU has the right idea about the selectors.
224 ;
225 mov edx, ds
226 mov ds, edx
227 mov ecx, es
228 mov es, ecx
229 mov edx, ss
230 mov ss, edx
231
232 %ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
233 Missing implementation!
234 %endif
235
236
237 %ifdef VBOX_WITH_STATISTICS
238 ;
239 ; Switcher stats.
240 ;
241 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToHC
242 mov edx, 0ffffffffh
243 STAM_PROFILE_ADV_STOP edx
244 %endif
245
246 ret
247ENDPROC vmmR0ToRawMode
248
249BITS 64
250%endif ;!VBOX_WITH_HYBRID_32BIT_KERNEL
251
252
253
254; *****************************************************************************
255; vmmR0ToRawModeAsm
256;
257; Phase one of the switch from host to guest context (host MMU context)
258;
259; INPUT:
260; - edx virtual address of CPUM structure (valid in host context)
261;
262; USES/DESTROYS:
263; - eax, ecx, edx, r8
264;
265; ASSUMPTION:
266; - current CS and DS selectors are wide open
267;
268; *****************************************************************************
269ALIGNCODE(16)
270BEGINPROC vmmR0ToRawModeAsm
271 ;; Store the offset from CPUM to CPUMCPU in r8
272 mov r8d, [rdx + CPUM.offCPUMCPU0]
273
274 ;;
275 ;; Save CPU host context
276 ;; Skip eax, edx and ecx as these are not preserved over calls.
277 ;;
278 ; general registers.
279 ; mov [rdx + r8 + CPUMCPU.Host.rax], rax - scratch
280 mov [rdx + r8 + CPUMCPU.Host.rbx], rbx
281 ; mov [rdx + r8 + CPUMCPU.Host.rcx], rcx - scratch
282 ; mov [rdx + r8 + CPUMCPU.Host.rdx], rdx - scratch
283 mov [rdx + r8 + CPUMCPU.Host.rdi], rdi
284 mov [rdx + r8 + CPUMCPU.Host.rsi], rsi
285 mov [rdx + r8 + CPUMCPU.Host.rsp], rsp
286 mov [rdx + r8 + CPUMCPU.Host.rbp], rbp
287 ; mov [rdx + r8 + CPUMCPU.Host.r8 ], r8 - scratch
288 ; mov [rdx + r8 + CPUMCPU.Host.r9 ], r9 - scratch
289 mov [rdx + r8 + CPUMCPU.Host.r10], r10
290 mov [rdx + r8 + CPUMCPU.Host.r11], r11
291 mov [rdx + r8 + CPUMCPU.Host.r12], r12
292 mov [rdx + r8 + CPUMCPU.Host.r13], r13
293 mov [rdx + r8 + CPUMCPU.Host.r14], r14
294 mov [rdx + r8 + CPUMCPU.Host.r15], r15
295 ; selectors.
296 mov [rdx + r8 + CPUMCPU.Host.ds], ds
297 mov [rdx + r8 + CPUMCPU.Host.es], es
298 mov [rdx + r8 + CPUMCPU.Host.fs], fs
299 mov [rdx + r8 + CPUMCPU.Host.gs], gs
300 mov [rdx + r8 + CPUMCPU.Host.ss], ss
301 ; MSRs
302 mov rbx, rdx
303 mov ecx, MSR_K8_FS_BASE
304 rdmsr
305 mov [rbx + r8 + CPUMCPU.Host.FSbase], eax
306 mov [rbx + r8 + CPUMCPU.Host.FSbase + 4], edx
307 mov ecx, MSR_K8_GS_BASE
308 rdmsr
309 mov [rbx + r8 + CPUMCPU.Host.GSbase], eax
310 mov [rbx + r8 + CPUMCPU.Host.GSbase + 4], edx
311 mov ecx, MSR_K6_EFER
312 rdmsr
313 mov [rbx + r8 + CPUMCPU.Host.efer], eax
314 mov [rbx + r8 + CPUMCPU.Host.efer + 4], edx
315 mov rdx, rbx
316 ; special registers.
317 sldt [rdx + r8 + CPUMCPU.Host.ldtr]
318 sidt [rdx + r8 + CPUMCPU.Host.idtr]
319 sgdt [rdx + r8 + CPUMCPU.Host.gdtr]
320 str [rdx + r8 + CPUMCPU.Host.tr] ; yasm BUG, generates sldt. YASMCHECK!
321 ; flags
322 pushf
323 pop qword [rdx + r8 + CPUMCPU.Host.rflags]
324
325%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
326 ; Block Local APIC NMI vectors
327 cmp byte [rdx + r8 + CPUMCPU.fX2Apic], 1
328 je htg_x2apic
329 mov rbx, [rdx + r8 + CPUMCPU.pvApicBase]
330 or rbx, rbx
331 jz htg_apic_done
332 xor edi, edi ; fApicDisVectors
333 mov eax, [rbx + APIC_REG_LVT_LINT0]
334 mov ecx, eax
335 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
336 cmp ecx, APIC_REG_LVT_MODE_NMI
337 jne htg_nolint0
338 or edi, 0x01
339 or eax, APIC_REG_LVT_MASKED
340 mov [rbx + APIC_REG_LVT_LINT0], eax
341 mov eax, [rbx + APIC_REG_LVT_LINT0] ; write completion
342htg_nolint0:
343 mov eax, [rbx + APIC_REG_LVT_LINT1]
344 mov ecx, eax
345 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
346 cmp ecx, APIC_REG_LVT_MODE_NMI
347 jne htg_nolint1
348 or edi, 0x02
349 or eax, APIC_REG_LVT_MASKED
350 mov [rbx + APIC_REG_LVT_LINT1], eax
351 mov eax, [rbx + APIC_REG_LVT_LINT1] ; write completion
352htg_nolint1:
353 mov eax, [rbx + APIC_REG_LVT_PC]
354 mov ecx, eax
355 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
356 cmp ecx, APIC_REG_LVT_MODE_NMI
357 jne htg_nopc
358 or edi, 0x04
359 or eax, APIC_REG_LVT_MASKED
360 mov [rbx + APIC_REG_LVT_PC], eax
361 mov eax, [rbx + APIC_REG_LVT_PC] ; write completion
362htg_nopc:
363 mov eax, [rbx + APIC_REG_VERSION]
364 shr eax, 16
365 cmp al, 5
366 jb htg_notherm
367 mov eax, [rbx + APIC_REG_LVT_THMR]
368 mov ecx, eax
369 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
370 cmp ecx, APIC_REG_LVT_MODE_NMI
371 jne htg_notherm
372 or edi, 0x08
373 or eax, APIC_REG_LVT_MASKED
374 mov [rbx + APIC_REG_LVT_THMR], eax
375 mov eax, [rbx + APIC_REG_LVT_THMR] ; write completion
376htg_notherm:
377 mov [rdx + r8 + CPUMCPU.fApicDisVectors], edi
378 jmp htg_apic_done
379
380 ; X2APIC?
381htg_x2apic:
382 mov r15, rdx ; save rdx
383 xor edi, edi ; fApicDisVectors
384
385 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT0 >> 4)
386 rdmsr
387 mov ebx, eax
388 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
389 cmp ebx, APIC_REG_LVT_MODE_NMI
390 jne htg_x2_nolint0
391 or edi, 0x01
392 or eax, APIC_REG_LVT_MASKED
393 wrmsr
394htg_x2_nolint0:
395 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT1 >> 4)
396 rdmsr
397 mov ebx, eax
398 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
399 cmp ebx, APIC_REG_LVT_MODE_NMI
400 jne htg_x2_nolint1
401 or edi, 0x02
402 or eax, APIC_REG_LVT_MASKED
403 wrmsr
404htg_x2_nolint1:
405 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_PC >> 4)
406 rdmsr
407 mov ebx, eax
408 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
409 cmp ebx, APIC_REG_LVT_MODE_NMI
410 jne htg_x2_nopc
411 or edi, 0x04
412 or eax, APIC_REG_LVT_MASKED
413 wrmsr
414htg_x2_nopc:
415 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_VERSION >> 4)
416 rdmsr
417 shr eax, 16
418 cmp al, 5
419 jb htg_x2_notherm
420 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_THMR >> 4)
421 rdmsr
422 mov ebx, eax
423 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
424 cmp ebx, APIC_REG_LVT_MODE_NMI
425 jne htg_x2_notherm
426 or edi, 0x08
427 or eax, APIC_REG_LVT_MASKED
428 wrmsr
429htg_x2_notherm:
430 mov rdx, r15
431 mov [rdx + r8 + CPUMCPU.fApicDisVectors], edi
432htg_apic_done:
433
434%endif ; VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
435
436 FIXUP FIX_NO_SYSENTER_JMP, 0, htg_no_sysenter - NAME(Start) ; this will insert a jmp htg_no_sysenter if host doesn't use sysenter.
437 ; save MSR_IA32_SYSENTER_CS register.
438 mov rbx, rdx ; save edx
439 mov ecx, MSR_IA32_SYSENTER_CS
440 rdmsr ; edx:eax <- MSR[ecx]
441 mov [rbx + r8 + CPUMCPU.Host.SysEnter.cs], eax
442 mov [rbx + r8 + CPUMCPU.Host.SysEnter.cs + 4], edx
443 xor eax, eax ; load 0:0 to cause #GP upon sysenter
444 xor edx, edx
445 wrmsr
446 mov rdx, rbx ; restore edx
447 jmp short htg_no_sysenter
448
449ALIGNCODE(16)
450htg_no_sysenter:
451
452 ;; handle use flags.
453 mov esi, [rdx + r8 + CPUMCPU.fUseFlags] ; esi == use flags.
454 and esi, ~CPUM_USED_FPU ; Clear CPUM_USED_* flags. ;;@todo FPU check can be optimized to use cr0 flags!
455 mov [rdx + r8 + CPUMCPU.fUseFlags], esi
456
457 ; debug registers.
458 test esi, CPUM_USE_DEBUG_REGS_HYPER | CPUM_USE_DEBUG_REGS_HOST
459 jnz htg_debug_regs_save
460htg_debug_regs_no:
461 DEBUG_CHAR('a') ; trashes esi
462
463 ; control registers.
464 mov rax, cr0
465 mov [rdx + r8 + CPUMCPU.Host.cr0], rax
466 ;mov rax, cr2 ; assume host os don't stuff things in cr2. (safe)
467 ;mov [rdx + r8 + CPUMCPU.Host.cr2], rax
468 mov rax, cr3
469 mov [rdx + r8 + CPUMCPU.Host.cr3], rax
470 mov rax, cr4
471 mov [rdx + r8 + CPUMCPU.Host.cr4], rax
472
473 ;;
474 ;; Start switching to VMM context.
475 ;;
476
477 ;
478 ; Change CR0 and CR4 so we can correctly emulate FPU/MMX/SSE[23] exceptions
479 ; Also disable WP. (eax==cr4 now)
480 ; Note! X86_CR4_PSE and X86_CR4_PAE are important if the host thinks so :-)
481 ;
482 and rax, X86_CR4_MCE | X86_CR4_PSE | X86_CR4_PAE
483 mov ecx, [rdx + r8 + CPUMCPU.Guest.cr4]
484 DEBUG_CHAR('b') ; trashes esi
485 ;; @todo Switcher cleanup: Determine base CR4 during CPUMR0Init / VMMR3SelectSwitcher putting it
486 ; in CPUMCPU.Hyper.cr4 (which isn't currently being used). That should
487 ; simplify this operation a bit (and improve locality of the data).
488
489 ;
490 ; CR4.AndMask and CR4.OrMask are set in CPUMR3Init based on the presence of
491 ; FXSAVE support on the host CPU
492 ;
493 and ecx, [rdx + CPUM.CR4.AndMask]
494 or eax, ecx
495 or eax, [rdx + CPUM.CR4.OrMask]
496 mov cr4, rax
497 DEBUG_CHAR('c') ; trashes esi
498
499 mov eax, [rdx + r8 + CPUMCPU.Guest.cr0]
500 and eax, X86_CR0_EM
501 or eax, X86_CR0_PE | X86_CR0_PG | X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | X86_CR0_MP
502 mov cr0, rax
503 DEBUG_CHAR('0') ; trashes esi
504
505
506 ; Load new gdt so we can do far jump to guest code after cr3 reload.
507 lgdt [rdx + r8 + CPUMCPU.Hyper.gdtr]
508 DEBUG_CHAR('1') ; trashes esi
509
510 ; Store the hypervisor cr3 for later loading
511 mov ebp, [rdx + r8 + CPUMCPU.Hyper.cr3]
512
513 ;;
514 ;; Load Intermediate memory context.
515 ;;
516 FIXUP FIX_INTER_AMD64_CR3, 1
517 mov eax, 0ffffffffh
518 mov cr3, rax
519 DEBUG_CHAR('2') ; trashes esi
520
521 ;;
522 ;; 1. Switch to compatibility mode, placing ourselves in identity mapped code.
523 ;;
524 jmp far [NAME(fpIDEnterTarget) wrt rip]
525
526; 16:32 Pointer to IDEnterTarget.
527NAME(fpIDEnterTarget):
528 FIXUP FIX_ID_32BIT, 0, NAME(IDEnterTarget) - NAME(Start)
529dd 0
530 FIXUP FIX_HYPER_CS, 0
531dd 0
532
533
534;;
535; Detour for saving the host DR7 and DR6.
536; esi and rdx must be preserved.
537htg_debug_regs_save:
538DEBUG_S_CHAR('s');
539 mov rax, dr7 ; not sure, but if I read the docs right this will trap if GD is set. FIXME!!!
540 mov [rdx + r8 + CPUMCPU.Host.dr7], rax
541 mov ecx, X86_DR7_INIT_VAL
542 cmp eax, ecx
543 je .htg_debug_regs_dr7_disabled
544 mov dr7, rcx
545.htg_debug_regs_dr7_disabled:
546 mov rax, dr6 ; just in case we save the state register too.
547 mov [rdx + r8 + CPUMCPU.Host.dr6], rax
548 ; save host DR0-3?
549 test esi, CPUM_USE_DEBUG_REGS_HYPER
550 jz htg_debug_regs_no
551DEBUG_S_CHAR('S');
552 mov rax, dr0
553 mov [rdx + r8 + CPUMCPU.Host.dr0], rax
554 mov rbx, dr1
555 mov [rdx + r8 + CPUMCPU.Host.dr1], rbx
556 mov rcx, dr2
557 mov [rdx + r8 + CPUMCPU.Host.dr2], rcx
558 mov rax, dr3
559 mov [rdx + r8 + CPUMCPU.Host.dr3], rax
560 or dword [rdx + r8 + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HOST
561 jmp htg_debug_regs_no
562
563
564 ; We're now on identity mapped pages in 32-bit compatibility mode.
565BITS 32
566ALIGNCODE(16)
567GLOBALNAME IDEnterTarget
568 DEBUG_CHAR('3')
569
570 ; 2. Deactivate long mode by turning off paging.
571 mov ebx, cr0
572 and ebx, ~X86_CR0_PG
573 mov cr0, ebx
574 DEBUG_CHAR('4')
575
576 ; 3. Load intermediate page table.
577 FIXUP SWITCHER_FIX_INTER_CR3_GC, 1
578 mov edx, 0ffffffffh
579 mov cr3, edx
580
581 ; 4. Disable long mode.
582 ; We also use the chance to disable syscall/sysret and fast fxsave/fxrstor.
583 mov ecx, MSR_K6_EFER
584 rdmsr
585 DEBUG_CHAR('5')
586 and eax, ~(MSR_K6_EFER_LME | MSR_K6_EFER_SCE | MSR_K6_EFER_FFXSR)
587 wrmsr
588 DEBUG_CHAR('6')
589
590%ifndef SWITCHER_TO_PAE
591 ; 4b. Disable PAE.
592 mov eax, cr4
593 and eax, ~X86_CR4_PAE
594 mov cr4, eax
595%else
596%endif
597
598 ; 5. Enable paging.
599 or ebx, X86_CR0_PG
600 mov cr0, ebx
601 jmp short just_a_jump
602just_a_jump:
603 DEBUG_CHAR('7')
604
605 ;;
606 ;; 6. Jump to guest code mapping of the code and load the Hypervisor CS.
607 ;;
608 FIXUP FIX_ID_2_GC_NEAR_REL, 1, NAME(JmpGCTarget) - NAME(Start)
609 jmp near NAME(JmpGCTarget)
610
611
612 ;;
613 ;; When we arrive at this label we're at the
614 ;; guest code mapping of the switching code.
615 ;;
616ALIGNCODE(16)
617GLOBALNAME JmpGCTarget
618 DEBUG_CHAR('-')
619 ; load final cr3 and do far jump to load cs.
620 mov cr3, ebp ; ebp set above
621 DEBUG_CHAR('0')
622
623 ;;
624 ;; We're in VMM MMU context and VMM CS is loaded.
625 ;; Setup the rest of the VMM state.
626 ;;
627 ; Load selectors
628 DEBUG_CHAR('1')
629 FIXUP FIX_HYPER_DS, 1
630 mov eax, 0ffffh
631 mov ds, eax
632 mov es, eax
633 xor eax, eax
634 mov gs, eax
635 mov fs, eax
636 ; Load pCpum into EDX
637 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0
638 mov edx, 0ffffffffh
639 ; Activate guest IDT
640 DEBUG_CHAR('2')
641 lidt [edx + CPUMCPU.Hyper.idtr]
642
643 ; Setup the stack.
644 DEBUG_CHAR('3')
645 mov ax, [edx + CPUMCPU.Hyper.ss.Sel]
646 mov ss, ax
647 mov esp, [edx + CPUMCPU.Hyper.esp]
648
649 ; Restore TSS selector; must mark it as not busy before using ltr (!)
650 DEBUG_S_CHAR('4')
651 FIXUP FIX_GC_TSS_GDTE_DW2, 2
652 and dword [0ffffffffh], ~0200h ; clear busy flag (2nd type2 bit)
653 DEBUG_S_CHAR('5')
654 ltr word [edx + CPUMCPU.Hyper.tr.Sel]
655 DEBUG_S_CHAR('6')
656
657 ; Activate the ldt (now we can safely crash).
658 lldt [edx + CPUMCPU.Hyper.ldtr.Sel]
659 DEBUG_S_CHAR('7')
660
661 ;; Use flags.
662 mov esi, [edx + CPUMCPU.fUseFlags]
663
664 ; debug registers
665 test esi, CPUM_USE_DEBUG_REGS_HYPER
666 jnz htg_debug_regs_guest
667htg_debug_regs_guest_done:
668 DEBUG_S_CHAR('9')
669
670 ; General registers (sans edx).
671 mov eax, [edx + CPUMCPU.Hyper.eax]
672 mov ebx, [edx + CPUMCPU.Hyper.ebx]
673 mov ecx, [edx + CPUMCPU.Hyper.ecx]
674 mov ebp, [edx + CPUMCPU.Hyper.ebp]
675 mov esi, [edx + CPUMCPU.Hyper.esi]
676 mov edi, [edx + CPUMCPU.Hyper.edi]
677 DEBUG_S_CHAR('!')
678
679 ;;
680 ;; Return to the VMM code which either called the switcher or
681 ;; the code set up to run by HC.
682 ;;
683 push dword [edx + CPUMCPU.Hyper.eflags]
684 push cs
685 push dword [edx + CPUMCPU.Hyper.eip]
686 mov edx, [edx + CPUMCPU.Hyper.edx] ; !! edx is no longer pointing to CPUMCPU here !!
687
688%ifdef DEBUG_STUFF
689 COM32_S_PRINT ';eip='
690 push eax
691 mov eax, [esp + 8]
692 COM32_S_DWORD_REG eax
693 pop eax
694 COM32_S_CHAR ';'
695%endif
696%ifdef VBOX_WITH_STATISTICS
697 push eax
698 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToGC
699 mov eax, 0ffffffffh
700 STAM32_PROFILE_ADV_STOP eax
701 pop eax
702%endif
703
704 iret ; Use iret to make debugging and TF/RF work.
705
706;;
707; Detour for saving host DR0-3 and loading hypervisor debug registers.
708; esi and edx must be preserved.
709htg_debug_regs_guest:
710 DEBUG_S_CHAR('D')
711 DEBUG_S_CHAR('R')
712 DEBUG_S_CHAR('x')
713 ; load hyper DR0-7
714 mov ebx, [edx + CPUMCPU.Hyper.dr]
715 mov dr0, ebx
716 mov ecx, [edx + CPUMCPU.Hyper.dr + 8*1]
717 mov dr1, ecx
718 mov eax, [edx + CPUMCPU.Hyper.dr + 8*2]
719 mov dr2, eax
720 mov ebx, [edx + CPUMCPU.Hyper.dr + 8*3]
721 mov dr3, ebx
722 mov ecx, X86_DR6_INIT_VAL
723 mov dr6, ecx
724 mov eax, [edx + CPUMCPU.Hyper.dr + 8*7]
725 mov dr7, eax
726 or dword [edx + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HYPER
727 jmp htg_debug_regs_guest_done
728
729ENDPROC vmmR0ToRawModeAsm
730
731
732;;
733; Trampoline for doing a call when starting the hyper visor execution.
734;
735; Push any arguments to the routine.
736; Push the argument frame size (cArg * 4).
737; Push the call target (_cdecl convention).
738; Push the address of this routine.
739;
740;
741ALIGNCODE(16)
742BEGINPROC vmmRCCallTrampoline
743%ifdef DEBUG_STUFF
744 COM32_S_CHAR 'c'
745 COM32_S_CHAR 't'
746 COM32_S_CHAR '!'
747%endif
748
749 ; call routine
750 pop eax ; call address
751 pop edi ; argument count.
752%ifdef DEBUG_STUFF
753 COM32_S_PRINT ';eax='
754 COM32_S_DWORD_REG eax
755 COM32_S_CHAR ';'
756%endif
757 call eax ; do call
758 add esp, edi ; cleanup stack
759
760 ; return to the host context (eax = C returncode).
761%ifdef DEBUG_STUFF
762 COM32_S_CHAR '`'
763%endif
764.to_host_again:
765 call NAME(vmmRCToHostAsm)
766 mov eax, VERR_VMM_SWITCHER_IPE_1
767 jmp .to_host_again
768ENDPROC vmmRCCallTrampoline
769
770
771
772;;
773; The C interface.
774;
775ALIGNCODE(16)
776BEGINPROC vmmRCToHost
777%ifdef DEBUG_STUFF
778 push esi
779 COM_NEWLINE
780 DEBUG_CHAR('b')
781 DEBUG_CHAR('a')
782 DEBUG_CHAR('c')
783 DEBUG_CHAR('k')
784 DEBUG_CHAR('!')
785 COM_NEWLINE
786 pop esi
787%endif
788 mov eax, [esp + 4]
789 jmp NAME(vmmRCToHostAsm)
790ENDPROC vmmRCToHost
791
792
793;;
794; vmmRCToHostAsmNoReturn
795;
796; This is an entry point used by TRPM when dealing with raw-mode traps,
797; i.e. traps in the hypervisor code. This will not return and saves no
798; state, because the caller has already saved the state.
799;
800; @param eax Return code.
801;
802ALIGNCODE(16)
803BEGINPROC vmmRCToHostAsmNoReturn
804 DEBUG_S_CHAR('%')
805
806%ifdef VBOX_WITH_STATISTICS
807 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
808 mov edx, 0ffffffffh
809 STAM32_PROFILE_ADV_STOP edx
810
811 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
812 mov edx, 0ffffffffh
813 STAM32_PROFILE_ADV_START edx
814
815 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
816 mov edx, 0ffffffffh
817 STAM32_PROFILE_ADV_START edx
818%endif
819
820 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0
821 mov edx, 0ffffffffh
822
823 jmp vmmRCToHostAsm_SaveNoGeneralRegs
824ENDPROC vmmRCToHostAsmNoReturn
825
826
827;;
828; vmmRCToHostAsm
829;
830; This is an entry point used by TRPM to return to host context when an
831; interrupt occured or an guest trap needs handling in host context. It
832; is also used by the C interface above.
833;
834; The hypervisor context is saved and it will return to the caller if
835; host context so desires.
836;
837; @param eax Return code.
838; @uses eax, edx, ecx (or it may use them in the future)
839;
840ALIGNCODE(16)
841BEGINPROC vmmRCToHostAsm
842 DEBUG_S_CHAR('%')
843 push edx
844
845%ifdef VBOX_WITH_STATISTICS
846 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
847 mov edx, 0ffffffffh
848 STAM32_PROFILE_ADV_STOP edx
849
850 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
851 mov edx, 0ffffffffh
852 STAM32_PROFILE_ADV_START edx
853
854 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
855 mov edx, 0ffffffffh
856 STAM32_PROFILE_ADV_START edx
857%endif
858
859 ;
860 ; Load the CPUM pointer.
861 ;
862 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0
863 mov edx, 0ffffffffh
864
865 ; Save register context.
866 pop dword [edx + CPUMCPU.Hyper.edx]
867 pop dword [edx + CPUMCPU.Hyper.eip] ; call return from stack
868 mov dword [edx + CPUMCPU.Hyper.esp], esp
869 mov dword [edx + CPUMCPU.Hyper.eax], eax
870 mov dword [edx + CPUMCPU.Hyper.ebx], ebx
871 mov dword [edx + CPUMCPU.Hyper.ecx], ecx
872 mov dword [edx + CPUMCPU.Hyper.esi], esi
873 mov dword [edx + CPUMCPU.Hyper.edi], edi
874 mov dword [edx + CPUMCPU.Hyper.ebp], ebp
875
876 ; special registers which may change.
877vmmRCToHostAsm_SaveNoGeneralRegs:
878%ifdef STRICT_IF
879 pushf
880 pop ecx
881 test ecx, X86_EFL_IF
882 jz .if_clear_out
883 mov eax, 0c0ffee01h
884 cli
885.if_clear_out:
886%endif
887 mov edi, eax ; save return code in EDI (careful with COM_DWORD_REG from here on!)
888
889 ; str [edx + CPUMCPU.Hyper.tr] - double fault only, and it won't be right then either.
890 sldt [edx + CPUMCPU.Hyper.ldtr.Sel]
891
892 ; No need to save CRx here. They are set dynamically according to Guest/Host requirements.
893 ; FPU context is saved before restore of host saving (another) branch.
894
895 ; Disable debug registers if active so they cannot trigger while switching.
896 test dword [edx + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HYPER
897 jz .gth_disabled_dr7
898 mov eax, X86_DR7_INIT_VAL
899 mov dr7, eax
900.gth_disabled_dr7:
901
902 ;;
903 ;; Load Intermediate memory context.
904 ;;
905 FIXUP SWITCHER_FIX_INTER_CR3_GC, 1
906 mov eax, 0ffffffffh
907 mov cr3, eax
908 DEBUG_CHAR('?')
909
910 ;; We're now in intermediate memory context!
911
912 ;;
913 ;; 0. Jump to identity mapped location
914 ;;
915 FIXUP FIX_GC_2_ID_NEAR_REL, 1, NAME(IDExitTarget) - NAME(Start)
916 jmp near NAME(IDExitTarget)
917
918 ; We're now on identity mapped pages!
919ALIGNCODE(16)
920GLOBALNAME IDExitTarget
921 DEBUG_CHAR('1')
922
923 ; 1. Disable paging.
924 mov ebx, cr0
925 and ebx, ~X86_CR0_PG
926 mov cr0, ebx
927 DEBUG_CHAR('2')
928
929 ; 2. Enable PAE.
930%ifdef SWITCHER_TO_PAE
931 ; - already enabled
932%else
933 mov ecx, cr4
934 or ecx, X86_CR4_PAE
935 mov cr4, ecx
936%endif
937
938 ; 3. Load long mode intermediate CR3.
939 FIXUP FIX_INTER_AMD64_CR3, 1
940 mov ecx, 0ffffffffh
941 mov cr3, ecx
942 DEBUG_CHAR('3')
943
944 ; 4. Enable long mode.
945 mov ebp, edx
946 mov ecx, MSR_K6_EFER
947 rdmsr
948 or eax, MSR_K6_EFER_LME
949 wrmsr
950 mov edx, ebp
951 DEBUG_CHAR('4')
952
953 ; 5. Enable paging.
954 or ebx, X86_CR0_PG
955 mov cr0, ebx
956 DEBUG_CHAR('5')
957
958 ; Jump from compatibility mode to 64-bit mode.
959 FIXUP FIX_ID_FAR32_TO_64BIT_MODE, 1, NAME(IDExit64Mode) - NAME(Start)
960 jmp 0ffffh:0fffffffeh
961
962 ;
963 ; We're in 64-bit mode (ds, ss, es, fs, gs are all bogus).
964 ; Move on to the HC mapping.
965 ;
966BITS 64
967ALIGNCODE(16)
968NAME(IDExit64Mode):
969 DEBUG_CHAR('6')
970 jmp [NAME(pHCExitTarget) wrt rip]
971
972; 64-bit jump target
973NAME(pHCExitTarget):
974FIXUP FIX_HC_64BIT, 0, NAME(HCExitTarget) - NAME(Start)
975dq 0ffffffffffffffffh
976
977; 64-bit pCpum address.
978NAME(pCpumHC):
979FIXUP FIX_HC_64BIT_CPUM, 0
980dq 0ffffffffffffffffh
981
982 ;
983 ; When we arrive here we're at the host context
984 ; mapping of the switcher code.
985 ;
986ALIGNCODE(16)
987GLOBALNAME HCExitTarget
988 DEBUG_CHAR('9')
989
990 ; Clear high dword of the CPUMCPU pointer
991 and rdx, 0ffffffffh
992
993 ; load final cr3
994 mov rsi, [rdx + CPUMCPU.Host.cr3]
995 mov cr3, rsi
996 DEBUG_CHAR('@')
997
998 ;;
999 ;; Restore Host context.
1000 ;;
1001 ; Load CPUM pointer into edx
1002 mov rdx, [NAME(pCpumHC) wrt rip]
1003 ; Load the CPUMCPU offset.
1004 mov r8d, [rdx + CPUM.offCPUMCPU0]
1005
1006 ; activate host gdt and idt
1007 lgdt [rdx + r8 + CPUMCPU.Host.gdtr]
1008 DEBUG_CHAR('0')
1009 lidt [rdx + r8 + CPUMCPU.Host.idtr]
1010 DEBUG_CHAR('1')
1011 ; Restore TSS selector; must mark it as not busy before using ltr (!)
1012%if 1 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
1013 movzx eax, word [rdx + r8 + CPUMCPU.Host.tr] ; eax <- TR
1014 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
1015 add rax, [rdx + r8 + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
1016 and dword [rax + 4], ~0200h ; clear busy flag (2nd type2 bit)
1017 ltr word [rdx + r8 + CPUMCPU.Host.tr]
1018%else
1019 movzx eax, word [rdx + r8 + CPUMCPU.Host.tr] ; eax <- TR
1020 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
1021 add rax, [rdx + r8 + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
1022 mov ecx, [rax + 4] ; ecx <- 2nd descriptor dword
1023 mov ebx, ecx ; save original value
1024 and ecx, ~0200h ; clear busy flag (2nd type2 bit)
1025 mov [rax + 4], ccx ; not using xchg here is paranoia..
1026 ltr word [rdx + r8 + CPUMCPU.Host.tr]
1027 xchg [rax + 4], ebx ; using xchg is paranoia too...
1028%endif
1029 ; activate ldt
1030 DEBUG_CHAR('2')
1031 lldt [rdx + r8 + CPUMCPU.Host.ldtr]
1032 ; Restore segment registers
1033 mov eax, [rdx + r8 + CPUMCPU.Host.ds]
1034 mov ds, eax
1035 mov eax, [rdx + r8 + CPUMCPU.Host.es]
1036 mov es, eax
1037 mov eax, [rdx + r8 + CPUMCPU.Host.fs]
1038 mov fs, eax
1039 mov eax, [rdx + r8 + CPUMCPU.Host.gs]
1040 mov gs, eax
1041 ; restore stack
1042 mov eax, [rdx + r8 + CPUMCPU.Host.ss]
1043 mov ss, eax
1044 mov rsp, [rdx + r8 + CPUMCPU.Host.rsp]
1045
1046 FIXUP FIX_NO_SYSENTER_JMP, 0, gth_sysenter_no - NAME(Start) ; this will insert a jmp gth_sysenter_no if host doesn't use sysenter.
1047 ; restore MSR_IA32_SYSENTER_CS register.
1048 mov rbx, rdx ; save edx
1049 mov ecx, MSR_IA32_SYSENTER_CS
1050 mov eax, [rbx + r8 + CPUMCPU.Host.SysEnter.cs]
1051 mov edx, [rbx + r8 + CPUMCPU.Host.SysEnter.cs + 4]
1052 wrmsr ; MSR[ecx] <- edx:eax
1053 mov rdx, rbx ; restore edx
1054 jmp short gth_sysenter_no
1055
1056ALIGNCODE(16)
1057gth_sysenter_no:
1058
1059 ;; @todo AMD syscall
1060
1061 ; Restore FPU if guest has used it.
1062 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
1063 mov esi, [rdx + r8 + CPUMCPU.fUseFlags] ; esi == use flags.
1064 test esi, CPUM_USED_FPU
1065 jz short gth_fpu_no
1066 mov rcx, cr0
1067 and rcx, ~(X86_CR0_TS | X86_CR0_EM)
1068 mov cr0, rcx
1069
1070 fxsave [rdx + r8 + CPUMCPU.Guest.fpu]
1071 fxrstor [rdx + r8 + CPUMCPU.Host.fpu]
1072 jmp short gth_fpu_no
1073
1074ALIGNCODE(16)
1075gth_fpu_no:
1076
1077 ; Control registers.
1078 ; Would've liked to have these higher up in case of crashes, but
1079 ; the fpu stuff must be done before we restore cr0.
1080 mov rcx, [rdx + r8 + CPUMCPU.Host.cr4]
1081 test rcx, X86_CR4_PCIDE
1082 jz gth_no_pcide
1083 mov rax, [rdx + r8 + CPUMCPU.Host.cr3]
1084 and rax, ~0xfff ; clear the PCID in cr3
1085 mov cr3, rax
1086 mov cr4, rcx
1087 mov rax, [rdx + r8 + CPUMCPU.Host.cr3]
1088 mov cr3, rax ; reload it with the right PCID.
1089 jmp gth_restored_cr4
1090gth_no_pcide:
1091 mov cr4, rcx
1092gth_restored_cr4:
1093 mov rcx, [rdx + r8 + CPUMCPU.Host.cr0]
1094 mov cr0, rcx
1095 ;mov rcx, [rdx + r8 + CPUMCPU.Host.cr2] ; assumes this is waste of time.
1096 ;mov cr2, rcx
1097
1098 ; Restore MSRs
1099 mov rbx, rdx
1100 mov ecx, MSR_K8_FS_BASE
1101 mov eax, [rbx + r8 + CPUMCPU.Host.FSbase]
1102 mov edx, [rbx + r8 + CPUMCPU.Host.FSbase + 4]
1103 wrmsr
1104 mov ecx, MSR_K8_GS_BASE
1105 mov eax, [rbx + r8 + CPUMCPU.Host.GSbase]
1106 mov edx, [rbx + r8 + CPUMCPU.Host.GSbase + 4]
1107 wrmsr
1108 mov ecx, MSR_K6_EFER
1109 mov eax, [rbx + r8 + CPUMCPU.Host.efer]
1110 mov edx, [rbx + r8 + CPUMCPU.Host.efer + 4]
1111 wrmsr
1112 mov rdx, rbx
1113
1114 ; Restore debug registers (if modified). (ESI must still be fUseFlags! Must be done late, at least after CR4!)
1115 test esi, CPUM_USE_DEBUG_REGS_HOST | CPUM_USED_DEBUG_REGS_HOST | CPUM_USE_DEBUG_REGS_HYPER
1116 jnz gth_debug_regs_restore
1117gth_debug_regs_done:
1118 and dword [rdx + r8 + CPUMCPU.fUseFlags], ~(CPUM_USED_DEBUG_REGS_HOST | CPUM_USED_DEBUG_REGS_HYPER)
1119
1120 ; Restore general registers.
1121 mov eax, edi ; restore return code. eax = return code !!
1122 ; mov rax, [rdx + r8 + CPUMCPU.Host.rax] - scratch + return code
1123 mov rbx, [rdx + r8 + CPUMCPU.Host.rbx]
1124 ; mov rcx, [rdx + r8 + CPUMCPU.Host.rcx] - scratch
1125 ; mov rdx, [rdx + r8 + CPUMCPU.Host.rdx] - scratch
1126 mov rdi, [rdx + r8 + CPUMCPU.Host.rdi]
1127 mov rsi, [rdx + r8 + CPUMCPU.Host.rsi]
1128 mov rsp, [rdx + r8 + CPUMCPU.Host.rsp]
1129 mov rbp, [rdx + r8 + CPUMCPU.Host.rbp]
1130 ; mov r8, [rdx + r8 + CPUMCPU.Host.r8 ] - scratch
1131 ; mov r9, [rdx + r8 + CPUMCPU.Host.r9 ] - scratch
1132 mov r10, [rdx + r8 + CPUMCPU.Host.r10]
1133 mov r11, [rdx + r8 + CPUMCPU.Host.r11]
1134 mov r12, [rdx + r8 + CPUMCPU.Host.r12]
1135 mov r13, [rdx + r8 + CPUMCPU.Host.r13]
1136 mov r14, [rdx + r8 + CPUMCPU.Host.r14]
1137 mov r15, [rdx + r8 + CPUMCPU.Host.r15]
1138
1139 ; finally restore flags. (probably not required)
1140 push qword [rdx + r8 + CPUMCPU.Host.rflags]
1141 popf
1142
1143
1144%ifdef DEBUG_STUFF
1145 COM64_S_CHAR '4'
1146%endif
1147 db 048h
1148 retf
1149
1150;;
1151; Detour for restoring the host debug registers.
1152; edx and edi must be preserved.
1153gth_debug_regs_restore:
1154 DEBUG_S_CHAR('d')
1155 mov rax, dr7 ; Some DR7 paranoia first...
1156 mov ecx, X86_DR7_INIT_VAL
1157 cmp rax, rcx
1158 je .gth_debug_skip_dr7_disabling
1159 mov dr7, rcx
1160.gth_debug_skip_dr7_disabling:
1161 test esi, CPUM_USED_DEBUG_REGS_HOST
1162 jz .gth_debug_regs_dr7
1163
1164 DEBUG_S_CHAR('r')
1165 mov rax, [rdx + r8 + CPUMCPU.Host.dr0]
1166 mov dr0, rax
1167 mov rbx, [rdx + r8 + CPUMCPU.Host.dr1]
1168 mov dr1, rbx
1169 mov rcx, [rdx + r8 + CPUMCPU.Host.dr2]
1170 mov dr2, rcx
1171 mov rax, [rdx + r8 + CPUMCPU.Host.dr3]
1172 mov dr3, rax
1173.gth_debug_regs_dr7:
1174 mov rbx, [rdx + r8 + CPUMCPU.Host.dr6]
1175 mov dr6, rbx
1176 mov rcx, [rdx + r8 + CPUMCPU.Host.dr7]
1177 mov dr7, rcx
1178
1179 ; We clear the USED flags in the main code path.
1180 jmp gth_debug_regs_done
1181
1182ENDPROC vmmRCToHostAsm
1183
1184
1185GLOBALNAME End
1186;
1187; The description string (in the text section).
1188;
1189NAME(Description):
1190 db SWITCHER_DESCRIPTION
1191 db 0
1192
1193extern NAME(Relocate)
1194
1195;
1196; End the fixup records.
1197;
1198BEGINDATA
1199 db FIX_THE_END ; final entry.
1200GLOBALNAME FixupsEnd
1201
1202;;
1203; The switcher definition structure.
1204ALIGNDATA(16)
1205GLOBALNAME Def
1206 istruc VMMSWITCHERDEF
1207 at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
1208 at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
1209 at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
1210 at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF NAME(Relocate)
1211 at VMMSWITCHERDEF.enmType, dd SWITCHER_TYPE
1212 at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
1213 at VMMSWITCHERDEF.offR0ToRawMode, dd NAME(vmmR0ToRawMode) - NAME(Start)
1214 at VMMSWITCHERDEF.offRCToHost, dd NAME(vmmRCToHost) - NAME(Start)
1215 at VMMSWITCHERDEF.offRCCallTrampoline, dd NAME(vmmRCCallTrampoline) - NAME(Start)
1216 at VMMSWITCHERDEF.offRCToHostAsm, dd NAME(vmmRCToHostAsm) - NAME(Start)
1217 at VMMSWITCHERDEF.offRCToHostAsmNoReturn, dd NAME(vmmRCToHostAsmNoReturn) - NAME(Start)
1218 ; disasm help
1219 at VMMSWITCHERDEF.offHCCode0, dd 0
1220 at VMMSWITCHERDEF.cbHCCode0, dd NAME(IDEnterTarget) - NAME(Start)
1221 at VMMSWITCHERDEF.offHCCode1, dd NAME(HCExitTarget) - NAME(Start)
1222 at VMMSWITCHERDEF.cbHCCode1, dd NAME(End) - NAME(HCExitTarget)
1223 at VMMSWITCHERDEF.offIDCode0, dd NAME(IDEnterTarget) - NAME(Start)
1224 at VMMSWITCHERDEF.cbIDCode0, dd NAME(JmpGCTarget) - NAME(IDEnterTarget)
1225 at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start)
1226 at VMMSWITCHERDEF.cbIDCode1, dd NAME(HCExitTarget) - NAME(IDExitTarget)
1227 at VMMSWITCHERDEF.offGCCode, dd NAME(JmpGCTarget) - NAME(Start)
1228 at VMMSWITCHERDEF.cbGCCode, dd NAME(IDExitTarget) - NAME(JmpGCTarget)
1229
1230 iend
1231
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette