VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMSwitcher/AMD64andLegacy.mac@ 34079

Last change on this file since 34079 was 33935, checked in by vboxsync, 14 years ago

VMM: mask all Local APIC interrupt vectors which are set up to NMI mode during world switch (raw mode only)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 36.5 KB
Line 
1; $Id: AMD64andLegacy.mac 33935 2010-11-10 15:37:02Z vboxsync $
2;; @file
3; VMM - World Switchers, template for AMD64 to PAE and 32-bit.
4;
5
6;
7; Copyright (C) 2006-2007 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18;%define DEBUG_STUFF 1
19;%define STRICT_IF 1
20
21;*******************************************************************************
22;* Header Files *
23;*******************************************************************************
24%include "VBox/asmdefs.mac"
25%include "VBox/apic.mac"
26%include "VBox/x86.mac"
27%include "VBox/cpum.mac"
28%include "VBox/stam.mac"
29%include "VBox/vm.mac"
30%include "CPUMInternal.mac"
31%include "VMMSwitcher/VMMSwitcher.mac"
32
33
34;
35; Start the fixup records
36; We collect the fixups in the .data section as we go along
37; It is therefore VITAL that no-one is using the .data section
38; for anything else between 'Start' and 'End'.
39;
40BEGINDATA
41GLOBALNAME Fixups
42
43
44
45BEGINCODE
46GLOBALNAME Start
47
48%ifndef VBOX_WITH_HYBRID_32BIT_KERNEL
49BITS 64
50
51;;
52; The C interface.
53;
54; @param pVM GCC: rdi MSC:rcx The VM handle.
55;
56BEGINPROC vmmR0HostToGuest
57%ifdef DEBUG_STUFF
58 COM64_S_NEWLINE
59 COM64_S_CHAR '^'
60%endif
61 ;
62 ; The ordinary version of the code.
63 ;
64
65 %ifdef STRICT_IF
66 pushf
67 pop rax
68 test eax, X86_EFL_IF
69 jz .if_clear_in
70 mov eax, 0c0ffee00h
71 ret
72.if_clear_in:
73 %endif
74
75 ;
76 ; make r9 = pVM and rdx = pCpum.
77 ; rax, rcx and r8 are scratch here after.
78 %ifdef RT_OS_WINDOWS
79 mov r9, rcx
80 %else
81 mov r9, rdi
82 %endif
83 lea rdx, [r9 + VM.cpum]
84
85 %ifdef VBOX_WITH_STATISTICS
86 ;
87 ; Switcher stats.
88 ;
89 lea r8, [r9 + VM.StatSwitcherToGC]
90 STAM64_PROFILE_ADV_START r8
91 %endif
92
93 ;
94 ; Call worker (far return).
95 ;
96 mov eax, cs
97 push rax
98 call NAME(vmmR0HostToGuestAsm)
99
100 %ifdef VBOX_WITH_STATISTICS
101 ;
102 ; Switcher stats.
103 ;
104 lea r8, [r9 + VM.StatSwitcherToGC]
105 STAM64_PROFILE_ADV_STOP r8
106 %endif
107
108 ret
109ENDPROC vmmR0HostToGuest
110
111
112%else ; VBOX_WITH_HYBRID_32BIT_KERNEL
113
114
115BITS 32
116
117;;
118; The C interface.
119;
120BEGINPROC vmmR0HostToGuest
121%ifdef DEBUG_STUFF
122 COM32_S_NEWLINE
123 COM32_S_CHAR '^'
124%endif
125
126 %ifdef VBOX_WITH_STATISTICS
127 ;
128 ; Switcher stats.
129 ;
130 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToGC
131 mov edx, 0ffffffffh
132 STAM_PROFILE_ADV_START edx
133 %endif
134
135 ; Thunk to/from 64 bit when invoking the worker routine.
136 ;
137 FIXUP FIX_HC_VM_OFF, 1, VM.cpum
138 mov edx, 0ffffffffh
139
140 push 0
141 push cs
142 push 0
143 FIXUP FIX_HC_32BIT, 1, .vmmR0HostToGuestReturn - NAME(Start)
144 push 0ffffffffh
145
146 FIXUP FIX_HC_64BIT_CS, 1
147 push 0ffffh
148 FIXUP FIX_HC_32BIT, 1, NAME(vmmR0HostToGuestAsm) - NAME(Start)
149 push 0ffffffffh
150 retf
151.vmmR0HostToGuestReturn:
152
153 ;
154 ; This selector reloading is probably not necessary, but we do it anyway to be quite sure
155 ; the CPU has the right idea about the selectors.
156 ;
157 mov edx, ds
158 mov ds, edx
159 mov ecx, es
160 mov es, ecx
161 mov edx, ss
162 mov ss, edx
163
164 %ifdef VBOX_WITH_STATISTICS
165 ;
166 ; Switcher stats.
167 ;
168 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToHC
169 mov edx, 0ffffffffh
170 STAM_PROFILE_ADV_STOP edx
171 %endif
172
173 ret
174ENDPROC vmmR0HostToGuest
175
176BITS 64
177%endif ;!VBOX_WITH_HYBRID_32BIT_KERNEL
178
179
180
181; *****************************************************************************
182; vmmR0HostToGuestAsm
183;
184; Phase one of the switch from host to guest context (host MMU context)
185;
186; INPUT:
187; - edx virtual address of CPUM structure (valid in host context)
188;
189; USES/DESTROYS:
190; - eax, ecx, edx, r8
191;
192; ASSUMPTION:
193; - current CS and DS selectors are wide open
194;
195; *****************************************************************************
196ALIGNCODE(16)
197BEGINPROC vmmR0HostToGuestAsm
198 ;; Store the offset from CPUM to CPUMCPU in r8
199 mov r8d, [rdx + CPUM.offCPUMCPU0]
200
201 ;;
202 ;; Save CPU host context
203 ;; Skip eax, edx and ecx as these are not preserved over calls.
204 ;;
205 ; general registers.
206 ; mov [rdx + r8 + CPUMCPU.Host.rax], rax - scratch
207 mov [rdx + r8 + CPUMCPU.Host.rbx], rbx
208 ; mov [rdx + r8 + CPUMCPU.Host.rcx], rcx - scratch
209 ; mov [rdx + r8 + CPUMCPU.Host.rdx], rdx - scratch
210 mov [rdx + r8 + CPUMCPU.Host.rdi], rdi
211 mov [rdx + r8 + CPUMCPU.Host.rsi], rsi
212 mov [rdx + r8 + CPUMCPU.Host.rsp], rsp
213 mov [rdx + r8 + CPUMCPU.Host.rbp], rbp
214 ; mov [rdx + r8 + CPUMCPU.Host.r8 ], r8 - scratch
215 ; mov [rdx + r8 + CPUMCPU.Host.r9 ], r9 - scratch
216 mov [rdx + r8 + CPUMCPU.Host.r10], r10
217 mov [rdx + r8 + CPUMCPU.Host.r11], r11
218 mov [rdx + r8 + CPUMCPU.Host.r12], r12
219 mov [rdx + r8 + CPUMCPU.Host.r13], r13
220 mov [rdx + r8 + CPUMCPU.Host.r14], r14
221 mov [rdx + r8 + CPUMCPU.Host.r15], r15
222 ; selectors.
223 mov [rdx + r8 + CPUMCPU.Host.ds], ds
224 mov [rdx + r8 + CPUMCPU.Host.es], es
225 mov [rdx + r8 + CPUMCPU.Host.fs], fs
226 mov [rdx + r8 + CPUMCPU.Host.gs], gs
227 mov [rdx + r8 + CPUMCPU.Host.ss], ss
228 ; MSRs
229 mov rbx, rdx
230 mov ecx, MSR_K8_FS_BASE
231 rdmsr
232 mov [rbx + r8 + CPUMCPU.Host.FSbase], eax
233 mov [rbx + r8 + CPUMCPU.Host.FSbase + 4], edx
234 mov ecx, MSR_K8_GS_BASE
235 rdmsr
236 mov [rbx + r8 + CPUMCPU.Host.GSbase], eax
237 mov [rbx + r8 + CPUMCPU.Host.GSbase + 4], edx
238 mov ecx, MSR_K6_EFER
239 rdmsr
240 mov [rbx + r8 + CPUMCPU.Host.efer], eax
241 mov [rbx + r8 + CPUMCPU.Host.efer + 4], edx
242 mov ecx, MSR_K6_EFER
243 mov rdx, rbx
244 ; special registers.
245 sldt [rdx + r8 + CPUMCPU.Host.ldtr]
246 sidt [rdx + r8 + CPUMCPU.Host.idtr]
247 sgdt [rdx + r8 + CPUMCPU.Host.gdtr]
248 str [rdx + r8 + CPUMCPU.Host.tr] ; yasm BUG, generates sldt. YASMCHECK!
249 ; flags
250 pushf
251 pop qword [rdx + r8 + CPUMCPU.Host.rflags]
252
253%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
254 ; Block Local APIC NMI vectors
255 mov rbx, [rdx + CPUM.pvApicBase]
256 or rbx, rbx
257 jz htg_noapic
258 xor edi, edi
259 mov eax, [rbx + APIC_REG_LVT_LINT0]
260 mov ecx, eax
261 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
262 cmp ecx, APIC_REG_LVT_MODE_NMI
263 jne htg_nolint0
264 or edi, 0x01
265 or eax, APIC_REG_LVT_MASKED
266 mov [rbx + APIC_REG_LVT_LINT0], eax
267 mov eax, [rbx + APIC_REG_LVT_LINT0] ; write completion
268htg_nolint0:
269 mov eax, [rbx + APIC_REG_LVT_LINT1]
270 mov ecx, eax
271 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
272 cmp ecx, APIC_REG_LVT_MODE_NMI
273 jne htg_nolint1
274 or edi, 0x02
275 or eax, APIC_REG_LVT_MASKED
276 mov [rbx + APIC_REG_LVT_LINT1], eax
277 mov eax, [rbx + APIC_REG_LVT_LINT1] ; write completion
278htg_nolint1:
279 mov eax, [rbx + APIC_REG_LVT_PC]
280 mov ecx, eax
281 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
282 cmp ecx, APIC_REG_LVT_MODE_NMI
283 jne htg_nopc
284 or edi, 0x04
285 or eax, APIC_REG_LVT_MASKED
286 mov [rbx + APIC_REG_LVT_PC], eax
287 mov eax, [rbx + APIC_REG_LVT_PC] ; write completion
288htg_nopc:
289 mov eax, [rbx + APIC_REG_VERSION]
290 shr eax, 16
291 cmp al, 5
292 jb htg_notherm
293 mov eax, [rbx + APIC_REG_LVT_THMR]
294 mov ecx, eax
295 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
296 cmp ecx, APIC_REG_LVT_MODE_NMI
297 jne htg_notherm
298 or edi, 0x08
299 or eax, APIC_REG_LVT_MASKED
300 mov [rbx + APIC_REG_LVT_THMR], eax
301 mov eax, [rbx + APIC_REG_LVT_THMR] ; write completion
302htg_notherm:
303 mov [rdx + CPUM.fApicDisVectors], edi
304htg_noapic:
305%endif
306
307 FIXUP FIX_NO_SYSENTER_JMP, 0, htg_no_sysenter - NAME(Start) ; this will insert a jmp htg_no_sysenter if host doesn't use sysenter.
308 ; save MSR_IA32_SYSENTER_CS register.
309 mov rbx, rdx ; save edx
310 mov ecx, MSR_IA32_SYSENTER_CS
311 rdmsr ; edx:eax <- MSR[ecx]
312 mov [rbx + r8 + CPUMCPU.Host.SysEnter.cs], eax
313 mov [rbx + r8 + CPUMCPU.Host.SysEnter.cs + 4], edx
314 xor eax, eax ; load 0:0 to cause #GP upon sysenter
315 xor edx, edx
316 wrmsr
317 mov rdx, rbx ; restore edx
318 jmp short htg_no_sysenter
319
320ALIGNCODE(16)
321htg_no_sysenter:
322
323 ;; handle use flags.
324 mov esi, [rdx + r8 + CPUMCPU.fUseFlags] ; esi == use flags.
325 and esi, ~CPUM_USED_FPU ; Clear CPUM_USED_* flags. ;;@todo FPU check can be optimized to use cr0 flags!
326 mov [rdx + r8 + CPUMCPU.fUseFlags], esi
327
328 ; debug registers.
329 test esi, CPUM_USE_DEBUG_REGS | CPUM_USE_DEBUG_REGS_HOST
330 jz htg_debug_regs_no
331 jmp htg_debug_regs_save
332htg_debug_regs_no:
333 DEBUG_CHAR('a') ; trashes esi
334
335 ; control registers.
336 mov rax, cr0
337 mov [rdx + r8 + CPUMCPU.Host.cr0], rax
338 ;mov rax, cr2 ; assume host os don't stuff things in cr2. (safe)
339 ;mov [rdx + r8 + CPUMCPU.Host.cr2], rax
340 mov rax, cr3
341 mov [rdx + r8 + CPUMCPU.Host.cr3], rax
342 mov rax, cr4
343 mov [rdx + r8 + CPUMCPU.Host.cr4], rax
344
345 ;;
346 ;; Start switching to VMM context.
347 ;;
348
349 ;
350 ; Change CR0 and CR4 so we can correctly emulate FPU/MMX/SSE[23] exceptions
351 ; Also disable WP. (eax==cr4 now)
352 ; Note! X86_CR4_PSE and X86_CR4_PAE are important if the host thinks so :-)
353 ;
354 and rax, X86_CR4_MCE | X86_CR4_PSE | X86_CR4_PAE
355 mov ecx, [rdx + r8 + CPUMCPU.Guest.cr4]
356 DEBUG_CHAR('b') ; trashes esi
357 ;; @todo Switcher cleanup: Determine base CR4 during CPUMR0Init / VMMR3SelectSwitcher putting it
358 ; in CPUMCPU.Hyper.cr4 (which isn't currently being used). That should
359 ; simplify this operation a bit (and improve locality of the data).
360
361 ;
362 ; CR4.AndMask and CR4.OrMask are set in CPUMR3Init based on the presence of
363 ; FXSAVE support on the host CPU
364 ;
365 and ecx, [rdx + CPUM.CR4.AndMask]
366 or eax, ecx
367 or eax, [rdx + CPUM.CR4.OrMask]
368 mov cr4, rax
369 DEBUG_CHAR('c') ; trashes esi
370
371 mov eax, [rdx + r8 + CPUMCPU.Guest.cr0]
372 and eax, X86_CR0_EM
373 or eax, X86_CR0_PE | X86_CR0_PG | X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | X86_CR0_MP
374 mov cr0, rax
375 DEBUG_CHAR('0') ; trashes esi
376
377
378 ; Load new gdt so we can do far jump to guest code after cr3 reload.
379 lgdt [rdx + r8 + CPUMCPU.Hyper.gdtr]
380 DEBUG_CHAR('1') ; trashes esi
381
382 ; Store the hypervisor cr3 for later loading
383 mov ebp, [rdx + r8 + CPUMCPU.Hyper.cr3]
384
385 ;;
386 ;; Load Intermediate memory context.
387 ;;
388 FIXUP FIX_INTER_AMD64_CR3, 1
389 mov eax, 0ffffffffh
390 mov cr3, rax
391 DEBUG_CHAR('2') ; trashes esi
392
393 ;;
394 ;; 1. Switch to compatibility mode, placing ourselves in identity mapped code.
395 ;;
396 jmp far [NAME(fpIDEnterTarget) wrt rip]
397
398; 16:32 Pointer to IDEnterTarget.
399NAME(fpIDEnterTarget):
400 FIXUP FIX_ID_32BIT, 0, NAME(IDEnterTarget) - NAME(Start)
401dd 0
402 FIXUP FIX_HYPER_CS, 0
403dd 0
404
405
406;;
407; Detour for saving the host DR7 and DR6.
408; esi and rdx must be preserved.
409htg_debug_regs_save:
410DEBUG_S_CHAR('s');
411 mov rax, dr7 ; not sure, but if I read the docs right this will trap if GD is set. FIXME!!!
412 mov [rdx + r8 + CPUMCPU.Host.dr7], rax
413 xor eax, eax ; clear everything. (bit 12? is read as 1...)
414 mov dr7, rax
415 mov rax, dr6 ; just in case we save the state register too.
416 mov [rdx + r8 + CPUMCPU.Host.dr6], rax
417 ; save host DR0-3?
418 test esi, CPUM_USE_DEBUG_REGS
419 jz near htg_debug_regs_no
420DEBUG_S_CHAR('S');
421 mov rax, dr0
422 mov [rdx + r8 + CPUMCPU.Host.dr0], rax
423 mov rbx, dr1
424 mov [rdx + r8 + CPUMCPU.Host.dr1], rbx
425 mov rcx, dr2
426 mov [rdx + r8 + CPUMCPU.Host.dr2], rcx
427 mov rax, dr3
428 mov [rdx + r8 + CPUMCPU.Host.dr3], rax
429 jmp htg_debug_regs_no
430
431
432 ; We're now on an identity mapped pages! in 32-bit compatibility mode.
433BITS 32
434ALIGNCODE(16)
435GLOBALNAME IDEnterTarget
436 DEBUG_CHAR('3')
437
438 ; 2. Deactivate long mode by turning off paging.
439 mov ebx, cr0
440 and ebx, ~X86_CR0_PG
441 mov cr0, ebx
442 DEBUG_CHAR('4')
443
444 ; 3. Load intermediate page table.
445 FIXUP SWITCHER_FIX_INTER_CR3_GC, 1
446 mov edx, 0ffffffffh
447 mov cr3, edx
448
449 ; 4. Disable long mode.
450 ; We also use the chance to disable syscall/sysret and fast fxsave/fxrstor.
451 mov ecx, MSR_K6_EFER
452 rdmsr
453 DEBUG_CHAR('5')
454 and eax, ~(MSR_K6_EFER_LME | MSR_K6_EFER_SCE | MSR_K6_EFER_FFXSR)
455 wrmsr
456 DEBUG_CHAR('6')
457
458%ifndef SWITCHER_TO_PAE
459 ; 4b. Disable PAE.
460 mov eax, cr4
461 and eax, ~X86_CR4_PAE
462 mov cr4, eax
463%else
464%endif
465
466 ; 5. Enable paging.
467 or ebx, X86_CR0_PG
468 mov cr0, ebx
469 jmp short just_a_jump
470just_a_jump:
471 DEBUG_CHAR('7')
472
473 ;;
474 ;; 6. Jump to guest code mapping of the code and load the Hypervisor CS.
475 ;;
476 FIXUP FIX_ID_2_GC_NEAR_REL, 1, NAME(JmpGCTarget) - NAME(Start)
477 jmp near NAME(JmpGCTarget)
478
479
480 ;;
481 ;; When we arrive at this label we're at the
482 ;; guest code mapping of the switching code.
483 ;;
484ALIGNCODE(16)
485GLOBALNAME JmpGCTarget
486 DEBUG_CHAR('-')
487;mov eax, 0ffff0000h
488;.delay_loop:
489;nop
490;dec eax
491;nop
492;jnz .delay_loop
493 ; load final cr3 and do far jump to load cs.
494 mov cr3, ebp ; ebp set above
495 DEBUG_CHAR('0')
496
497 ;;
498 ;; We're in VMM MMU context and VMM CS is loaded.
499 ;; Setup the rest of the VMM state.
500 ;;
501 ; Load selectors
502 DEBUG_CHAR('1')
503 FIXUP FIX_HYPER_DS, 1
504 mov eax, 0ffffh
505 mov ds, eax
506 mov es, eax
507 xor eax, eax
508 mov gs, eax
509 mov fs, eax
510 ; Load pCpum into EDX
511 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0
512 mov edx, 0ffffffffh
513 ; Activate guest IDT
514 DEBUG_CHAR('2')
515 lidt [edx + CPUMCPU.Hyper.idtr]
516
517 ; Setup stack; use the lss_esp, ss pair for lss
518 DEBUG_CHAR('3')
519 mov eax, [edx + CPUMCPU.Hyper.esp]
520 mov [edx + CPUMCPU.Hyper.lss_esp], eax
521 lss esp, [edx + CPUMCPU.Hyper.lss_esp]
522
523 ; Restore TSS selector; must mark it as not busy before using ltr (!)
524 DEBUG_CHAR('4')
525 FIXUP FIX_GC_TSS_GDTE_DW2, 2
526 and dword [0ffffffffh], ~0200h ; clear busy flag (2nd type2 bit)
527 DEBUG_CHAR('5')
528 ltr word [edx + CPUMCPU.Hyper.tr]
529 DEBUG_CHAR('6')
530
531 ; Activate the ldt (now we can safely crash).
532 lldt [edx + CPUMCPU.Hyper.ldtr]
533 DEBUG_CHAR('7')
534
535 ;; use flags.
536 mov esi, [edx + CPUMCPU.fUseFlags]
537
538 ; debug registers
539 test esi, CPUM_USE_DEBUG_REGS
540 jz htg_debug_regs_guest_no
541 jmp htg_debug_regs_guest
542htg_debug_regs_guest_no:
543 DEBUG_CHAR('9')
544
545 ; General registers.
546 mov ebx, [edx + CPUMCPU.Hyper.ebx]
547 mov ebp, [edx + CPUMCPU.Hyper.ebp]
548 mov esi, [edx + CPUMCPU.Hyper.esi]
549 mov edi, [edx + CPUMCPU.Hyper.edi]
550 push dword [edx + CPUMCPU.Hyper.eflags]
551 popfd
552 DEBUG_CHAR('!')
553
554 ;;
555 ;; Return to the VMM code which either called the switcher or
556 ;; the code set up to run by HC.
557 ;;
558%ifdef DEBUG_STUFF
559 COM32_S_PRINT ';eip='
560 mov eax, [edx + CPUMCPU.Hyper.eip]
561 COM32_S_DWORD_REG eax
562 COM32_S_CHAR ';'
563%endif
564 mov eax, [edx + CPUMCPU.Hyper.eip]
565 ; callees expect CPUM ptr
566 CPUM_FROM_CPUMCPU(edx)
567
568%ifdef VBOX_WITH_STATISTICS
569 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToGC
570 mov edx, 0ffffffffh
571 STAM32_PROFILE_ADV_STOP edx
572 FIXUP FIX_GC_CPUM_OFF, 1, 0
573 mov edx, 0ffffffffh
574%endif
575 jmp eax
576
577;;
578; Detour for saving host DR0-3 and loading hypervisor debug registers.
579; esi and edx must be preserved.
580htg_debug_regs_guest:
581 DEBUG_S_CHAR('D')
582 DEBUG_S_CHAR('R')
583 DEBUG_S_CHAR('x')
584 ; load hyper DR0-7
585 mov ebx, [edx + CPUMCPU.Hyper.dr]
586 mov dr0, ebx
587 mov ecx, [edx + CPUMCPU.Hyper.dr + 8*1]
588 mov dr1, ecx
589 mov eax, [edx + CPUMCPU.Hyper.dr + 8*2]
590 mov dr2, eax
591 mov ebx, [edx + CPUMCPU.Hyper.dr + 8*3]
592 mov dr3, ebx
593 ;mov eax, [edx + CPUMCPU.Hyper.dr + 8*6]
594 mov ecx, 0ffff0ff0h
595 mov dr6, ecx
596 mov eax, [edx + CPUMCPU.Hyper.dr + 8*7]
597 mov dr7, eax
598 jmp htg_debug_regs_guest_no
599
600ENDPROC vmmR0HostToGuestAsm
601
602
603;;
604; Trampoline for doing a call when starting the hyper visor execution.
605;
606; Push any arguments to the routine.
607; Push the argument frame size (cArg * 4).
608; Push the call target (_cdecl convention).
609; Push the address of this routine.
610;
611;
612ALIGNCODE(16)
613BEGINPROC vmmGCCallTrampoline
614%ifdef DEBUG_STUFF
615 COM32_S_CHAR 'c'
616 COM32_S_CHAR 't'
617 COM32_S_CHAR '!'
618%endif
619
620 ; call routine
621 pop eax ; call address
622 mov esi, edx ; save edx
623 pop edi ; argument count.
624%ifdef DEBUG_STUFF
625 COM32_S_PRINT ';eax='
626 COM32_S_DWORD_REG eax
627 COM32_S_CHAR ';'
628%endif
629 call eax ; do call
630 add esp, edi ; cleanup stack
631
632 ; return to the host context.
633 push byte 0 ; eip
634 mov edx, esi ; CPUM pointer
635
636%ifdef DEBUG_STUFF
637 COM32_S_CHAR '`'
638%endif
639 jmp NAME(VMMGCGuestToHostAsm) ; eax = returncode.
640ENDPROC vmmGCCallTrampoline
641
642
643
644;;
645; The C interface.
646;
647ALIGNCODE(16)
648BEGINPROC vmmGCGuestToHost
649%ifdef DEBUG_STUFF
650 push esi
651 COM_NEWLINE
652 DEBUG_CHAR('b')
653 DEBUG_CHAR('a')
654 DEBUG_CHAR('c')
655 DEBUG_CHAR('k')
656 DEBUG_CHAR('!')
657 COM_NEWLINE
658 pop esi
659%endif
660 mov eax, [esp + 4]
661 jmp NAME(VMMGCGuestToHostAsm)
662ENDPROC vmmGCGuestToHost
663
664
665;;
666; VMMGCGuestToHostAsmGuestCtx
667;
668; Switches from Guest Context to Host Context.
669; Of course it's only called from within the GC.
670;
671; @param eax Return code.
672; @param esp + 4 Pointer to CPUMCTXCORE.
673;
674; @remark ASSUMES interrupts disabled.
675;
676ALIGNCODE(16)
677BEGINPROC VMMGCGuestToHostAsmGuestCtx
678 DEBUG_CHAR('~')
679
680%ifdef VBOX_WITH_STATISTICS
681 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
682 mov edx, 0ffffffffh
683 STAM32_PROFILE_ADV_STOP edx
684
685 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
686 mov edx, 0ffffffffh
687 STAM32_PROFILE_ADV_START edx
688
689 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
690 mov edx, 0ffffffffh
691 STAM32_PROFILE_ADV_START edx
692%endif
693
694 ;
695 ; Load the CPUMCPU pointer.
696 ;
697 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0
698 mov edx, 0ffffffffh
699
700 ; Skip return address (assumes called!)
701 lea esp, [esp + 4]
702
703 ;
704 ; Guest Context (assumes esp now points to CPUMCTXCORE structure).
705 ;
706 ; general purpose registers
707 push eax ; save return code.
708 mov eax, [esp + 4 + CPUMCTXCORE.edi]
709 mov [edx + CPUMCPU.Guest.edi], eax
710 mov eax, [esp + 4 + CPUMCTXCORE.esi]
711 mov [edx + CPUMCPU.Guest.esi], eax
712 mov eax, [esp + 4 + CPUMCTXCORE.ebp]
713 mov [edx + CPUMCPU.Guest.ebp], eax
714 mov eax, [esp + 4 + CPUMCTXCORE.eax]
715 mov [edx + CPUMCPU.Guest.eax], eax
716 mov eax, [esp + 4 + CPUMCTXCORE.ebx]
717 mov [edx + CPUMCPU.Guest.ebx], eax
718 mov eax, [esp + 4 + CPUMCTXCORE.edx]
719 mov [edx + CPUMCPU.Guest.edx], eax
720 mov eax, [esp + 4 + CPUMCTXCORE.ecx]
721 mov [edx + CPUMCPU.Guest.ecx], eax
722 mov eax, [esp + 4 + CPUMCTXCORE.esp]
723 mov [edx + CPUMCPU.Guest.esp], eax
724 ; selectors
725 mov eax, [esp + 4 + CPUMCTXCORE.ss]
726 mov [edx + CPUMCPU.Guest.ss], eax
727 mov eax, [esp + 4 + CPUMCTXCORE.gs]
728 mov [edx + CPUMCPU.Guest.gs], eax
729 mov eax, [esp + 4 + CPUMCTXCORE.fs]
730 mov [edx + CPUMCPU.Guest.fs], eax
731 mov eax, [esp + 4 + CPUMCTXCORE.es]
732 mov [edx + CPUMCPU.Guest.es], eax
733 mov eax, [esp + 4 + CPUMCTXCORE.ds]
734 mov [edx + CPUMCPU.Guest.ds], eax
735 mov eax, [esp + 4 + CPUMCTXCORE.cs]
736 mov [edx + CPUMCPU.Guest.cs], eax
737 ; flags
738 mov eax, [esp + 4 + CPUMCTXCORE.eflags]
739 mov [edx + CPUMCPU.Guest.eflags], eax
740 ; eip
741 mov eax, [esp + 4 + CPUMCTXCORE.eip]
742 mov [edx + CPUMCPU.Guest.eip], eax
743 ; jump to common worker code.
744 pop eax ; restore return code.
745
746 add esp, CPUMCTXCORE_size ; skip CPUMCTXCORE structure
747
748 jmp vmmGCGuestToHostAsm_EIPDone
749ENDPROC VMMGCGuestToHostAsmGuestCtx
750
751
752;;
753; VMMGCGuestToHostAsmHyperCtx
754;
755; This is an alternative entry point which we'll be using
756; when the we have the hypervisor context and need to save
757; that before going to the host.
758;
759; This is typically useful when abandoning the hypervisor
760; because of a trap and want the trap state to be saved.
761;
762; @param eax Return code.
763; @param ecx Points to CPUMCTXCORE.
764; @uses eax,edx,ecx
765ALIGNCODE(16)
766BEGINPROC VMMGCGuestToHostAsmHyperCtx
767 DEBUG_CHAR('#')
768
769%ifdef VBOX_WITH_STATISTICS
770 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
771 mov edx, 0ffffffffh
772 STAM32_PROFILE_ADV_STOP edx
773
774 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
775 mov edx, 0ffffffffh
776 STAM32_PROFILE_ADV_START edx
777
778 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
779 mov edx, 0ffffffffh
780 STAM32_PROFILE_ADV_START edx
781%endif
782
783 ;
784 ; Load the CPUM pointer.
785 ;
786 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0
787 mov edx, 0ffffffffh
788
789 push eax ; save return code.
790 ; general purpose registers
791 mov eax, [ecx + CPUMCTXCORE.edi]
792 mov [edx + CPUMCPU.Hyper.edi], eax
793 mov eax, [ecx + CPUMCTXCORE.esi]
794 mov [edx + CPUMCPU.Hyper.esi], eax
795 mov eax, [ecx + CPUMCTXCORE.ebp]
796 mov [edx + CPUMCPU.Hyper.ebp], eax
797 mov eax, [ecx + CPUMCTXCORE.eax]
798 mov [edx + CPUMCPU.Hyper.eax], eax
799 mov eax, [ecx + CPUMCTXCORE.ebx]
800 mov [edx + CPUMCPU.Hyper.ebx], eax
801 mov eax, [ecx + CPUMCTXCORE.edx]
802 mov [edx + CPUMCPU.Hyper.edx], eax
803 mov eax, [ecx + CPUMCTXCORE.ecx]
804 mov [edx + CPUMCPU.Hyper.ecx], eax
805 mov eax, [ecx + CPUMCTXCORE.esp]
806 mov [edx + CPUMCPU.Hyper.esp], eax
807 ; selectors
808 mov eax, [ecx + CPUMCTXCORE.ss]
809 mov [edx + CPUMCPU.Hyper.ss], eax
810 mov eax, [ecx + CPUMCTXCORE.gs]
811 mov [edx + CPUMCPU.Hyper.gs], eax
812 mov eax, [ecx + CPUMCTXCORE.fs]
813 mov [edx + CPUMCPU.Hyper.fs], eax
814 mov eax, [ecx + CPUMCTXCORE.es]
815 mov [edx + CPUMCPU.Hyper.es], eax
816 mov eax, [ecx + CPUMCTXCORE.ds]
817 mov [edx + CPUMCPU.Hyper.ds], eax
818 mov eax, [ecx + CPUMCTXCORE.cs]
819 mov [edx + CPUMCPU.Hyper.cs], eax
820 ; flags
821 mov eax, [ecx + CPUMCTXCORE.eflags]
822 mov [edx + CPUMCPU.Hyper.eflags], eax
823 ; eip
824 mov eax, [ecx + CPUMCTXCORE.eip]
825 mov [edx + CPUMCPU.Hyper.eip], eax
826 ; jump to common worker code.
827 pop eax ; restore return code.
828 jmp vmmGCGuestToHostAsm_SkipHyperRegs
829
830ENDPROC VMMGCGuestToHostAsmHyperCtx
831
832
833;;
834; VMMGCGuestToHostAsm
835;
836; This is an alternative entry point which we'll be using
837; when the we have saved the guest state already or we haven't
838; been messing with the guest at all.
839;
840; @param eax Return code.
841; @uses eax, edx, ecx (or it may use them in the future)
842;
843ALIGNCODE(16)
844BEGINPROC VMMGCGuestToHostAsm
845 DEBUG_CHAR('%')
846
847%ifdef VBOX_WITH_STATISTICS
848 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
849 mov edx, 0ffffffffh
850 STAM32_PROFILE_ADV_STOP edx
851
852 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
853 mov edx, 0ffffffffh
854 STAM32_PROFILE_ADV_START edx
855
856 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
857 mov edx, 0ffffffffh
858 STAM32_PROFILE_ADV_START edx
859%endif
860
861 ;
862 ; Load the CPUM pointer.
863 ;
864 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0
865 mov edx, 0ffffffffh
866
867 pop dword [edx + CPUMCPU.Hyper.eip] ; call return from stack
868 jmp short vmmGCGuestToHostAsm_EIPDone
869
870ALIGNCODE(16)
871vmmGCGuestToHostAsm_EIPDone:
872 ; general registers which we care about.
873 mov dword [edx + CPUMCPU.Hyper.ebx], ebx
874 mov dword [edx + CPUMCPU.Hyper.esi], esi
875 mov dword [edx + CPUMCPU.Hyper.edi], edi
876 mov dword [edx + CPUMCPU.Hyper.ebp], ebp
877 mov dword [edx + CPUMCPU.Hyper.esp], esp
878
879 ; special registers which may change.
880vmmGCGuestToHostAsm_SkipHyperRegs:
881%ifdef STRICT_IF
882 pushf
883 pop ecx
884 test ecx, X86_EFL_IF
885 jz .if_clear_out
886 mov eax, 0c0ffee01h
887 cli
888.if_clear_out:
889%endif
890 ; str [edx + CPUMCPU.Hyper.tr] - double fault only, and it won't be right then either.
891 sldt [edx + CPUMCPU.Hyper.ldtr]
892
893 ; No need to save CRx here. They are set dynamically according to Guest/Host requirements.
894 ; FPU context is saved before restore of host saving (another) branch.
895
896
897 ;;
898 ;; Load Intermediate memory context.
899 ;;
900 mov edi, eax ; save return code in EDI (careful with COM_DWORD_REG from here on!)
901 FIXUP SWITCHER_FIX_INTER_CR3_GC, 1
902 mov eax, 0ffffffffh
903 mov cr3, eax
904 DEBUG_CHAR('?')
905
906 ;; We're now in intermediate memory context!
907
908 ;;
909 ;; 0. Jump to identity mapped location
910 ;;
911 FIXUP FIX_GC_2_ID_NEAR_REL, 1, NAME(IDExitTarget) - NAME(Start)
912 jmp near NAME(IDExitTarget)
913
914 ; We're now on identity mapped pages!
915ALIGNCODE(16)
916GLOBALNAME IDExitTarget
917 DEBUG_CHAR('1')
918
919 ; 1. Disable paging.
920 mov ebx, cr0
921 and ebx, ~X86_CR0_PG
922 mov cr0, ebx
923 DEBUG_CHAR('2')
924
925 ; 2. Enable PAE.
926%ifdef SWITCHER_TO_PAE
927 ; - already enabled
928%else
929 mov ecx, cr4
930 or ecx, X86_CR4_PAE
931 mov cr4, ecx
932%endif
933
934 ; 3. Load long mode intermediate CR3.
935 FIXUP FIX_INTER_AMD64_CR3, 1
936 mov ecx, 0ffffffffh
937 mov cr3, ecx
938 DEBUG_CHAR('3')
939
940 ; 4. Enable long mode.
941 mov ebp, edx
942 mov ecx, MSR_K6_EFER
943 rdmsr
944 or eax, MSR_K6_EFER_LME
945 wrmsr
946 mov edx, ebp
947 DEBUG_CHAR('4')
948
949 ; 5. Enable paging.
950 or ebx, X86_CR0_PG
951 mov cr0, ebx
952 DEBUG_CHAR('5')
953
954 ; Jump from compatibility mode to 64-bit mode.
955 FIXUP FIX_ID_FAR32_TO_64BIT_MODE, 1, NAME(IDExit64Mode) - NAME(Start)
956 jmp 0ffffh:0fffffffeh
957
958 ;
959 ; We're in 64-bit mode (ds, ss, es, fs, gs are all bogus).
960 ; Move on to the HC mapping.
961 ;
962BITS 64
963ALIGNCODE(16)
964NAME(IDExit64Mode):
965 DEBUG_CHAR('6')
966 jmp [NAME(pHCExitTarget) wrt rip]
967
968; 64-bit jump target
969NAME(pHCExitTarget):
970FIXUP FIX_HC_64BIT, 0, NAME(HCExitTarget) - NAME(Start)
971dq 0ffffffffffffffffh
972
973; 64-bit pCpum address.
974NAME(pCpumHC):
975FIXUP FIX_HC_64BIT_CPUM, 0
976dq 0ffffffffffffffffh
977
978 ;
979 ; When we arrive here we're at the host context
980 ; mapping of the switcher code.
981 ;
982ALIGNCODE(16)
983GLOBALNAME HCExitTarget
984 DEBUG_CHAR('9')
985
986 ; Clear high dword of the CPUMCPU pointer
987 and rdx, 0ffffffffh
988
989 ; load final cr3
990 mov rsi, [rdx + CPUMCPU.Host.cr3]
991 mov cr3, rsi
992 DEBUG_CHAR('@')
993
994 ;;
995 ;; Restore Host context.
996 ;;
997 ; Load CPUM pointer into edx
998 mov rdx, [NAME(pCpumHC) wrt rip]
999 ; Load the CPUMCPU offset.
1000 mov r8d, [rdx + CPUM.offCPUMCPU0]
1001
1002 ; activate host gdt and idt
1003 lgdt [rdx + r8 + CPUMCPU.Host.gdtr]
1004 DEBUG_CHAR('0')
1005 lidt [rdx + r8 + CPUMCPU.Host.idtr]
1006 DEBUG_CHAR('1')
1007 ; Restore TSS selector; must mark it as not busy before using ltr (!)
1008%if 1 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
1009 movzx eax, word [rdx + r8 + CPUMCPU.Host.tr] ; eax <- TR
1010 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
1011 add rax, [rdx + r8 + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
1012 and dword [rax + 4], ~0200h ; clear busy flag (2nd type2 bit)
1013 ltr word [rdx + r8 + CPUMCPU.Host.tr]
1014%else
1015 movzx eax, word [rdx + r8 + CPUMCPU.Host.tr] ; eax <- TR
1016 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
1017 add rax, [rdx + r8 + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
1018 mov ecx, [rax + 4] ; ecx <- 2nd descriptor dword
1019 mov ebx, ecx ; save original value
1020 and ecx, ~0200h ; clear busy flag (2nd type2 bit)
1021 mov [rax + 4], ccx ; not using xchg here is paranoia..
1022 ltr word [rdx + r8 + CPUMCPU.Host.tr]
1023 xchg [rax + 4], ebx ; using xchg is paranoia too...
1024%endif
1025 ; activate ldt
1026 DEBUG_CHAR('2')
1027 lldt [rdx + r8 + CPUMCPU.Host.ldtr]
1028 ; Restore segment registers
1029 mov eax, [rdx + r8 + CPUMCPU.Host.ds]
1030 mov ds, eax
1031 mov eax, [rdx + r8 + CPUMCPU.Host.es]
1032 mov es, eax
1033 mov eax, [rdx + r8 + CPUMCPU.Host.fs]
1034 mov fs, eax
1035 mov eax, [rdx + r8 + CPUMCPU.Host.gs]
1036 mov gs, eax
1037 ; restore stack
1038 mov eax, [rdx + r8 + CPUMCPU.Host.ss]
1039 mov ss, eax
1040 mov rsp, [rdx + r8 + CPUMCPU.Host.rsp]
1041
1042 FIXUP FIX_NO_SYSENTER_JMP, 0, gth_sysenter_no - NAME(Start) ; this will insert a jmp gth_sysenter_no if host doesn't use sysenter.
1043 ; restore MSR_IA32_SYSENTER_CS register.
1044 mov rbx, rdx ; save edx
1045 mov ecx, MSR_IA32_SYSENTER_CS
1046 mov eax, [rbx + r8 + CPUMCPU.Host.SysEnter.cs]
1047 mov edx, [rbx + r8 + CPUMCPU.Host.SysEnter.cs + 4]
1048 wrmsr ; MSR[ecx] <- edx:eax
1049 mov rdx, rbx ; restore edx
1050 jmp short gth_sysenter_no
1051
1052ALIGNCODE(16)
1053gth_sysenter_no:
1054
1055 ;; @todo AMD syscall
1056
1057 ; Restore FPU if guest has used it.
1058 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
1059 mov esi, [rdx + r8 + CPUMCPU.fUseFlags] ; esi == use flags.
1060 test esi, CPUM_USED_FPU
1061 jz short gth_fpu_no
1062 mov rcx, cr0
1063 and rcx, ~(X86_CR0_TS | X86_CR0_EM)
1064 mov cr0, rcx
1065
1066 fxsave [rdx + r8 + CPUMCPU.Guest.fpu]
1067 fxrstor [rdx + r8 + CPUMCPU.Host.fpu]
1068 jmp short gth_fpu_no
1069
1070ALIGNCODE(16)
1071gth_fpu_no:
1072
1073 ; Control registers.
1074 ; Would've liked to have these higher up in case of crashes, but
1075 ; the fpu stuff must be done before we restore cr0.
1076 mov rcx, [rdx + r8 + CPUMCPU.Host.cr4]
1077 mov cr4, rcx
1078 mov rcx, [rdx + r8 + CPUMCPU.Host.cr0]
1079 mov cr0, rcx
1080 ;mov rcx, [rdx + r8 + CPUMCPU.Host.cr2] ; assumes this is waste of time.
1081 ;mov cr2, rcx
1082
1083 ; restore debug registers (if modified) (esi must still be fUseFlags!)
1084 ; (must be done after cr4 reload because of the debug extension.)
1085 test esi, CPUM_USE_DEBUG_REGS | CPUM_USE_DEBUG_REGS_HOST
1086 jz short gth_debug_regs_no
1087 jmp gth_debug_regs_restore
1088gth_debug_regs_no:
1089
1090 ; Restore MSRs
1091 mov rbx, rdx
1092 mov ecx, MSR_K8_FS_BASE
1093 mov eax, [rbx + r8 + CPUMCPU.Host.FSbase]
1094 mov edx, [rbx + r8 + CPUMCPU.Host.FSbase + 4]
1095 wrmsr
1096 mov ecx, MSR_K8_GS_BASE
1097 mov eax, [rbx + r8 + CPUMCPU.Host.GSbase]
1098 mov edx, [rbx + r8 + CPUMCPU.Host.GSbase + 4]
1099 wrmsr
1100 mov ecx, MSR_K6_EFER
1101 mov eax, [rbx + r8 + CPUMCPU.Host.efer]
1102 mov edx, [rbx + r8 + CPUMCPU.Host.efer + 4]
1103 wrmsr
1104 mov rdx, rbx
1105
1106%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
1107 ;Unblock Local APIC NMI vectors
1108 mov ecx, [rdx + CPUM.fApicDisVectors]
1109 mov rbx, [rdx + CPUM.pvApicBase]
1110 shr ecx, 1
1111 jnc gth_nolint0
1112 and dword [rbx + APIC_REG_LVT_LINT0], ~APIC_REG_LVT_MASKED
1113gth_nolint0:
1114 shr ecx, 1
1115 jnc gth_nolint1
1116 and dword [rbx + APIC_REG_LVT_LINT1], ~APIC_REG_LVT_MASKED
1117gth_nolint1:
1118 shr ecx, 1
1119 jnc gth_nopc
1120 and dword [rbx + APIC_REG_LVT_PC], ~APIC_REG_LVT_MASKED
1121gth_nopc:
1122 shr ecx, 1
1123 jnc gth_notherm
1124 and dword [rbx + APIC_REG_LVT_THMR], ~APIC_REG_LVT_MASKED
1125gth_notherm:
1126%endif
1127
1128 ; restore general registers.
1129 mov eax, edi ; restore return code. eax = return code !!
1130 ; mov rax, [rdx + r8 + CPUMCPU.Host.rax] - scratch + return code
1131 mov rbx, [rdx + r8 + CPUMCPU.Host.rbx]
1132 ; mov rcx, [rdx + r8 + CPUMCPU.Host.rcx] - scratch
1133 ; mov rdx, [rdx + r8 + CPUMCPU.Host.rdx] - scratch
1134 mov rdi, [rdx + r8 + CPUMCPU.Host.rdi]
1135 mov rsi, [rdx + r8 + CPUMCPU.Host.rsi]
1136 mov rsp, [rdx + r8 + CPUMCPU.Host.rsp]
1137 mov rbp, [rdx + r8 + CPUMCPU.Host.rbp]
1138 ; mov r8, [rdx + r8 + CPUMCPU.Host.r8 ] - scratch
1139 ; mov r9, [rdx + r8 + CPUMCPU.Host.r9 ] - scratch
1140 mov r10, [rdx + r8 + CPUMCPU.Host.r10]
1141 mov r11, [rdx + r8 + CPUMCPU.Host.r11]
1142 mov r12, [rdx + r8 + CPUMCPU.Host.r12]
1143 mov r13, [rdx + r8 + CPUMCPU.Host.r13]
1144 mov r14, [rdx + r8 + CPUMCPU.Host.r14]
1145 mov r15, [rdx + r8 + CPUMCPU.Host.r15]
1146
1147 ; finally restore flags. (probably not required)
1148 push qword [rdx + r8 + CPUMCPU.Host.rflags]
1149 popf
1150
1151
1152%ifdef DEBUG_STUFF
1153 COM64_S_CHAR '4'
1154%endif
1155 db 048h
1156 retf
1157
1158;;
1159; Detour for restoring the host debug registers.
1160; edx and edi must be preserved.
1161gth_debug_regs_restore:
1162 DEBUG_S_CHAR('d')
1163 xor eax, eax
1164 mov dr7, rax ; paranoia or not?
1165 test esi, CPUM_USE_DEBUG_REGS
1166 jz short gth_debug_regs_dr7
1167 DEBUG_S_CHAR('r')
1168 mov rax, [rdx + r8 + CPUMCPU.Host.dr0]
1169 mov dr0, rax
1170 mov rbx, [rdx + r8 + CPUMCPU.Host.dr1]
1171 mov dr1, rbx
1172 mov rcx, [rdx + r8 + CPUMCPU.Host.dr2]
1173 mov dr2, rcx
1174 mov rax, [rdx + r8 + CPUMCPU.Host.dr3]
1175 mov dr3, rax
1176gth_debug_regs_dr7:
1177 mov rbx, [rdx + r8 + CPUMCPU.Host.dr6]
1178 mov dr6, rbx
1179 mov rcx, [rdx + r8 + CPUMCPU.Host.dr7]
1180 mov dr7, rcx
1181 jmp gth_debug_regs_no
1182
1183ENDPROC VMMGCGuestToHostAsm
1184
1185
1186GLOBALNAME End
1187;
1188; The description string (in the text section).
1189;
1190NAME(Description):
1191 db SWITCHER_DESCRIPTION
1192 db 0
1193
1194extern NAME(Relocate)
1195
1196;
1197; End the fixup records.
1198;
1199BEGINDATA
1200 db FIX_THE_END ; final entry.
1201GLOBALNAME FixupsEnd
1202
1203;;
1204; The switcher definition structure.
1205ALIGNDATA(16)
1206GLOBALNAME Def
1207 istruc VMMSWITCHERDEF
1208 at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
1209 at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
1210 at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
1211 at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF NAME(Relocate)
1212 at VMMSWITCHERDEF.enmType, dd SWITCHER_TYPE
1213 at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
1214 at VMMSWITCHERDEF.offR0HostToGuest, dd NAME(vmmR0HostToGuest) - NAME(Start)
1215 at VMMSWITCHERDEF.offGCGuestToHost, dd NAME(vmmGCGuestToHost) - NAME(Start)
1216 at VMMSWITCHERDEF.offGCCallTrampoline, dd NAME(vmmGCCallTrampoline) - NAME(Start)
1217 at VMMSWITCHERDEF.offGCGuestToHostAsm, dd NAME(VMMGCGuestToHostAsm) - NAME(Start)
1218 at VMMSWITCHERDEF.offGCGuestToHostAsmHyperCtx, dd NAME(VMMGCGuestToHostAsmHyperCtx)- NAME(Start)
1219 at VMMSWITCHERDEF.offGCGuestToHostAsmGuestCtx, dd NAME(VMMGCGuestToHostAsmGuestCtx)- NAME(Start)
1220 ; disasm help
1221 at VMMSWITCHERDEF.offHCCode0, dd 0
1222 at VMMSWITCHERDEF.cbHCCode0, dd NAME(IDEnterTarget) - NAME(Start)
1223 at VMMSWITCHERDEF.offHCCode1, dd NAME(HCExitTarget) - NAME(Start)
1224 at VMMSWITCHERDEF.cbHCCode1, dd NAME(End) - NAME(HCExitTarget)
1225 at VMMSWITCHERDEF.offIDCode0, dd NAME(IDEnterTarget) - NAME(Start)
1226 at VMMSWITCHERDEF.cbIDCode0, dd NAME(JmpGCTarget) - NAME(IDEnterTarget)
1227 at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start)
1228 at VMMSWITCHERDEF.cbIDCode1, dd NAME(HCExitTarget) - NAME(IDExitTarget)
1229 at VMMSWITCHERDEF.offGCCode, dd NAME(JmpGCTarget) - NAME(Start)
1230 at VMMSWITCHERDEF.cbGCCode, dd NAME(IDExitTarget) - NAME(JmpGCTarget)
1231
1232 iend
1233
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette