VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMSwitcher/LegacyandAMD64.mac@ 47764

Last change on this file since 47764 was 47660, checked in by vboxsync, 12 years ago

VMM: Debug register handling redo. (only partly tested on AMD-V so far.)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 36.9 KB
Line 
1; $Id: LegacyandAMD64.mac 47660 2013-08-12 00:37:34Z vboxsync $
2;; @file
3; VMM - World Switchers, 32-bit to AMD64 intermediate context.
4;
5; This is used for running 64-bit guest on 32-bit hosts, not
6; normal raw-mode. All the code involved is contained in this
7; file.
8;
9
10;
11; Copyright (C) 2006-2013 Oracle Corporation
12;
13; This file is part of VirtualBox Open Source Edition (OSE), as
14; available from http://www.virtualbox.org. This file is free software;
15; you can redistribute it and/or modify it under the terms of the GNU
16; General Public License (GPL) as published by the Free Software
17; Foundation, in version 2 as it comes in the "COPYING" file of the
18; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
19; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
20;
21
22
23;*******************************************************************************
24;* Defined Constants And Macros *
25;*******************************************************************************
26;; @note These values are from the HM64ON32OP enum in hm.h.
27%define HM64ON32OP_VMXRCStartVM64 1
28%define HM64ON32OP_SVMRCVMRun64 2
29%define HM64ON32OP_HMRCSaveGuestFPU64 3
30%define HM64ON32OP_HMRCSaveGuestDebug64 4
31%define HM64ON32OP_HMRCTestSwitcher64 5
32
33;; Stubs for making OS/2 compile (though, not work).
34%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
35 %macro vmwrite 2,
36 int3
37 %endmacro
38 %define vmlaunch int3
39 %define vmresume int3
40 %define vmsave int3
41 %define vmload int3
42 %define vmrun int3
43 %define clgi int3
44 %define stgi int3
45 %macro invlpga 2,
46 int3
47 %endmacro
48%endif
49
50;; Debug options
51;%define DEBUG_STUFF 1
52;%define STRICT_IF 1
53
54
55;*******************************************************************************
56;* Header Files *
57;*******************************************************************************
58%include "VBox/asmdefs.mac"
59%include "iprt/x86.mac"
60%include "VBox/err.mac"
61%include "VBox/apic.mac"
62
63%include "VBox/vmm/cpum.mac"
64%include "VBox/vmm/stam.mac"
65%include "VBox/vmm/vm.mac"
66%include "VBox/vmm/hm_vmx.mac"
67%include "CPUMInternal.mac"
68%include "HMInternal.mac"
69%include "VMMSwitcher.mac"
70
71
72;
73; Start the fixup records
74; We collect the fixups in the .data section as we go along
75; It is therefore VITAL that no-one is using the .data section
76; for anything else between 'Start' and 'End'.
77;
78BEGINDATA
79GLOBALNAME Fixups
80
81
82
83BEGINCODE
84GLOBALNAME Start
85
86BITS 32
87
88;;
89; The C interface.
90; @param [esp + 04h] Param 1 - VM handle
91; @param [esp + 08h] Param 2 - Offset from VM::CPUM to the CPUMCPU
92; structure for the calling EMT.
93;
94BEGINPROC vmmR0ToRawMode
95%ifdef DEBUG_STUFF
96 COM32_S_NEWLINE
97 COM32_S_CHAR '^'
98%endif
99
100%ifdef VBOX_WITH_STATISTICS
101 ;
102 ; Switcher stats.
103 ;
104 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToGC
105 mov edx, 0ffffffffh
106 STAM_PROFILE_ADV_START edx
107%endif
108
109 push ebp
110 mov ebp, [esp + 12] ; CPUMCPU offset
111
112 ; turn off interrupts
113 pushf
114 cli
115
116 ;
117 ; Call worker.
118 ;
119 FIXUP FIX_HC_CPUM_OFF, 1, 0
120 mov edx, 0ffffffffh
121 push cs ; allow for far return and restore cs correctly.
122 call NAME(vmmR0ToRawModeAsm)
123
124%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
125 CPUM_FROM_CPUMCPU(edx)
126 ; Restore blocked Local APIC NMI vectors
127 mov ecx, [edx + CPUM.fApicDisVectors]
128 mov edx, [edx + CPUM.pvApicBase]
129 shr ecx, 1
130 jnc gth_nolint0
131 and dword [edx + APIC_REG_LVT_LINT0], ~APIC_REG_LVT_MASKED
132gth_nolint0:
133 shr ecx, 1
134 jnc gth_nolint1
135 and dword [edx + APIC_REG_LVT_LINT1], ~APIC_REG_LVT_MASKED
136gth_nolint1:
137 shr ecx, 1
138 jnc gth_nopc
139 and dword [edx + APIC_REG_LVT_PC], ~APIC_REG_LVT_MASKED
140gth_nopc:
141 shr ecx, 1
142 jnc gth_notherm
143 and dword [edx + APIC_REG_LVT_THMR], ~APIC_REG_LVT_MASKED
144gth_notherm:
145%endif
146
147 ; restore original flags
148 popf
149 pop ebp
150
151%ifdef VBOX_WITH_STATISTICS
152 ;
153 ; Switcher stats.
154 ;
155 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToHC
156 mov edx, 0ffffffffh
157 STAM_PROFILE_ADV_STOP edx
158%endif
159
160 ret
161
162ENDPROC vmmR0ToRawMode
163
164; *****************************************************************************
165; vmmR0ToRawModeAsm
166;
167; Phase one of the switch from host to guest context (host MMU context)
168;
169; INPUT:
170; - edx virtual address of CPUM structure (valid in host context)
171; - ebp offset of the CPUMCPU structure relative to CPUM.
172;
173; USES/DESTROYS:
174; - eax, ecx, edx, esi
175;
176; ASSUMPTION:
177; - current CS and DS selectors are wide open
178;
179; *****************************************************************************
180ALIGNCODE(16)
181BEGINPROC vmmR0ToRawModeAsm
182 ;;
183 ;; Save CPU host context
184 ;; Skip eax, edx and ecx as these are not preserved over calls.
185 ;;
186 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
187%ifdef VBOX_WITH_CRASHDUMP_MAGIC
188 ; phys address of scratch page
189 mov eax, dword [edx + CPUMCPU.Guest.dr + 4*8]
190 mov cr2, eax
191
192 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 1
193%endif
194
195 ; general registers.
196 mov [edx + CPUMCPU.Host.ebx], ebx
197 mov [edx + CPUMCPU.Host.edi], edi
198 mov [edx + CPUMCPU.Host.esi], esi
199 mov [edx + CPUMCPU.Host.esp], esp
200 mov [edx + CPUMCPU.Host.ebp], ebp
201 ; selectors.
202 mov [edx + CPUMCPU.Host.ds], ds
203 mov [edx + CPUMCPU.Host.es], es
204 mov [edx + CPUMCPU.Host.fs], fs
205 mov [edx + CPUMCPU.Host.gs], gs
206 mov [edx + CPUMCPU.Host.ss], ss
207 ; special registers.
208 DEBUG32_S_CHAR('s')
209 DEBUG32_S_CHAR(';')
210 sldt [edx + CPUMCPU.Host.ldtr]
211 sidt [edx + CPUMCPU.Host.idtr]
212 sgdt [edx + CPUMCPU.Host.gdtr]
213 str [edx + CPUMCPU.Host.tr]
214
215%ifdef VBOX_WITH_CRASHDUMP_MAGIC
216 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 2
217%endif
218
219%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
220 DEBUG32_S_CHAR('f')
221 DEBUG32_S_CHAR(';')
222 CPUM_FROM_CPUMCPU_WITH_OFFSET edx, ebp
223 mov ebx, [edx + CPUM.pvApicBase]
224 or ebx, ebx
225 jz htg_noapic
226 mov eax, [ebx + APIC_REG_LVT_LINT0]
227 mov ecx, eax
228 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
229 cmp ecx, APIC_REG_LVT_MODE_NMI
230 jne htg_nolint0
231 or edi, 0x01
232 or eax, APIC_REG_LVT_MASKED
233 mov [ebx + APIC_REG_LVT_LINT0], eax
234 mov eax, [ebx + APIC_REG_LVT_LINT0] ; write completion
235htg_nolint0:
236 mov eax, [ebx + APIC_REG_LVT_LINT1]
237 mov ecx, eax
238 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
239 cmp ecx, APIC_REG_LVT_MODE_NMI
240 jne htg_nolint1
241 or edi, 0x02
242 or eax, APIC_REG_LVT_MASKED
243 mov [ebx + APIC_REG_LVT_LINT1], eax
244 mov eax, [ebx + APIC_REG_LVT_LINT1] ; write completion
245htg_nolint1:
246 mov eax, [ebx + APIC_REG_LVT_PC]
247 mov ecx, eax
248 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
249 cmp ecx, APIC_REG_LVT_MODE_NMI
250 jne htg_nopc
251 or edi, 0x04
252 or eax, APIC_REG_LVT_MASKED
253 mov [ebx + APIC_REG_LVT_PC], eax
254 mov eax, [ebx + APIC_REG_LVT_PC] ; write completion
255htg_nopc:
256 mov eax, [ebx + APIC_REG_VERSION]
257 shr eax, 16
258 cmp al, 5
259 jb htg_notherm
260 mov eax, [ebx + APIC_REG_LVT_THMR]
261 mov ecx, eax
262 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
263 cmp ecx, APIC_REG_LVT_MODE_NMI
264 jne htg_notherm
265 or edi, 0x08
266 or eax, APIC_REG_LVT_MASKED
267 mov [ebx + APIC_REG_LVT_THMR], eax
268 mov eax, [ebx + APIC_REG_LVT_THMR] ; write completion
269htg_notherm:
270 mov [edx + CPUM.fApicDisVectors], edi
271htg_noapic:
272 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
273%endif
274
275 ; control registers.
276 mov eax, cr0
277 mov [edx + CPUMCPU.Host.cr0], eax
278 ;Skip cr2; assume host os don't stuff things in cr2. (safe)
279 mov eax, cr3
280 mov [edx + CPUMCPU.Host.cr3], eax
281 mov eax, cr4
282 mov [edx + CPUMCPU.Host.cr4], eax
283 DEBUG32_S_CHAR('c')
284 DEBUG32_S_CHAR(';')
285
286 ; save the host EFER msr
287 mov ebx, edx
288 mov ecx, MSR_K6_EFER
289 rdmsr
290 mov [ebx + CPUMCPU.Host.efer], eax
291 mov [ebx + CPUMCPU.Host.efer + 4], edx
292 mov edx, ebx
293 DEBUG32_S_CHAR('e')
294 DEBUG32_S_CHAR(';')
295
296%ifdef VBOX_WITH_CRASHDUMP_MAGIC
297 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 3
298%endif
299
300 ; Load new gdt so we can do a far jump after going into 64 bits mode
301 lgdt [edx + CPUMCPU.Hyper.gdtr]
302
303 DEBUG32_S_CHAR('g')
304 DEBUG32_S_CHAR('!')
305%ifdef VBOX_WITH_CRASHDUMP_MAGIC
306 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 4
307%endif
308
309 ;;
310 ;; Load Intermediate memory context.
311 ;;
312 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
313 mov eax, 0ffffffffh
314 mov cr3, eax
315 DEBUG32_CHAR('?')
316
317 ;;
318 ;; Jump to identity mapped location
319 ;;
320 FIXUP FIX_HC_2_ID_NEAR_REL, 1, NAME(IDEnterTarget) - NAME(Start)
321 jmp near NAME(IDEnterTarget)
322
323
324 ; We're now on identity mapped pages!
325ALIGNCODE(16)
326GLOBALNAME IDEnterTarget
327 DEBUG32_CHAR('1')
328
329 ; 1. Disable paging.
330 mov ebx, cr0
331 and ebx, ~X86_CR0_PG
332 mov cr0, ebx
333 DEBUG32_CHAR('2')
334
335%ifdef VBOX_WITH_CRASHDUMP_MAGIC
336 mov eax, cr2
337 mov dword [eax], 3
338%endif
339
340 ; 2. Enable PAE.
341 mov ecx, cr4
342 or ecx, X86_CR4_PAE
343 mov cr4, ecx
344
345 ; 3. Load long mode intermediate CR3.
346 FIXUP FIX_INTER_AMD64_CR3, 1
347 mov ecx, 0ffffffffh
348 mov cr3, ecx
349 DEBUG32_CHAR('3')
350
351%ifdef VBOX_WITH_CRASHDUMP_MAGIC
352 mov eax, cr2
353 mov dword [eax], 4
354%endif
355
356 ; 4. Enable long mode.
357 mov esi, edx
358 mov ecx, MSR_K6_EFER
359 rdmsr
360 FIXUP FIX_EFER_OR_MASK, 1
361 or eax, 0ffffffffh
362 and eax, ~(MSR_K6_EFER_FFXSR) ; turn off fast fxsave/fxrstor (skipping xmm regs)
363 wrmsr
364 mov edx, esi
365 DEBUG32_CHAR('4')
366
367%ifdef VBOX_WITH_CRASHDUMP_MAGIC
368 mov eax, cr2
369 mov dword [eax], 5
370%endif
371
372 ; 5. Enable paging.
373 or ebx, X86_CR0_PG
374 ; Disable ring 0 write protection too
375 and ebx, ~X86_CR0_WRITE_PROTECT
376 mov cr0, ebx
377 DEBUG32_CHAR('5')
378
379 ; Jump from compatibility mode to 64-bit mode.
380 FIXUP FIX_ID_FAR32_TO_64BIT_MODE, 1, NAME(IDEnter64Mode) - NAME(Start)
381 jmp 0ffffh:0fffffffeh
382
383 ;
384 ; We're in 64-bit mode (ds, ss, es, fs, gs are all bogus).
385BITS 64
386ALIGNCODE(16)
387NAME(IDEnter64Mode):
388 DEBUG64_CHAR('6')
389 jmp [NAME(pICEnterTarget) wrt rip]
390
391; 64-bit jump target
392NAME(pICEnterTarget):
393FIXUP FIX_HC_64BIT_NOCHECK, 0, NAME(ICEnterTarget) - NAME(Start)
394dq 0ffffffffffffffffh
395
396; 64-bit pCpum address.
397NAME(pCpumIC):
398FIXUP FIX_GC_64_BIT_CPUM_OFF, 0, 0
399dq 0ffffffffffffffffh
400
401%ifdef VBOX_WITH_CRASHDUMP_MAGIC
402NAME(pMarker):
403db 'Switch_marker'
404%endif
405
406 ;
407 ; When we arrive here we're in 64 bits mode in the intermediate context
408 ;
409ALIGNCODE(16)
410GLOBALNAME ICEnterTarget
411 ; Load CPUM pointer into rdx
412 mov rdx, [NAME(pCpumIC) wrt rip]
413 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
414
415 mov rax, cs
416 mov ds, rax
417 mov es, rax
418
419 ; Invalidate fs & gs
420 mov rax, 0
421 mov fs, rax
422 mov gs, rax
423
424%ifdef VBOX_WITH_CRASHDUMP_MAGIC
425 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 5
426%endif
427
428 ; Setup stack.
429 DEBUG64_CHAR('7')
430 mov rsp, 0
431 mov eax, [rdx + CPUMCPU.Hyper.ss.Sel]
432 mov ss, ax
433 mov esp, [rdx + CPUMCPU.Hyper.esp]
434
435%ifdef VBOX_WITH_CRASHDUMP_MAGIC
436 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 6
437%endif
438
439
440 ; load the hypervisor function address
441 mov r9, [rdx + CPUMCPU.Hyper.eip]
442 DEBUG64_S_CHAR('8')
443
444 ; Check if we need to restore the guest FPU state
445 mov esi, [rdx + CPUMCPU.fUseFlags] ; esi == use flags.
446 test esi, CPUM_SYNC_FPU_STATE
447 jz near htg_fpu_no
448
449%ifdef VBOX_WITH_CRASHDUMP_MAGIC
450 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 7
451%endif
452
453 mov rax, cr0
454 mov rcx, rax ; save old CR0
455 and rax, ~(X86_CR0_TS | X86_CR0_EM)
456 mov cr0, rax
457 fxrstor [rdx + CPUMCPU.Guest.fpu]
458 mov cr0, rcx ; and restore old CR0 again
459
460 and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_FPU_STATE
461
462htg_fpu_no:
463 ; Check if we need to restore the guest debug state
464 test esi, CPUM_SYNC_DEBUG_REGS_GUEST | CPUM_SYNC_DEBUG_REGS_HYPER
465 jz htg_debug_done
466
467%ifdef VBOX_WITH_CRASHDUMP_MAGIC
468 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 8
469%endif
470 test esi, CPUM_SYNC_DEBUG_REGS_HYPER
471 jnz htg_debug_hyper
472
473 ; Guest values in DRx, letting the guest access them directly.
474 mov rax, qword [rdx + CPUMCPU.Guest.dr + 0*8]
475 mov dr0, rax
476 mov rax, qword [rdx + CPUMCPU.Guest.dr + 1*8]
477 mov dr1, rax
478 mov rax, qword [rdx + CPUMCPU.Guest.dr + 2*8]
479 mov dr2, rax
480 mov rax, qword [rdx + CPUMCPU.Guest.dr + 3*8]
481 mov dr3, rax
482 mov rax, qword [rdx + CPUMCPU.Guest.dr + 6*8]
483 mov dr6, rax ; not required for AMD-V
484
485 and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_DEBUG_REGS_GUEST
486 or dword [rdx + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_GUEST
487 jmp htg_debug_done
488
489htg_debug_hyper:
490 ; Combined values in DRx, intercepting all accesses.
491 mov rax, qword [rdx + CPUMCPU.Hyper.dr + 0*8]
492 mov dr0, rax
493 mov rax, qword [rdx + CPUMCPU.Hyper.dr + 1*8]
494 mov dr1, rax
495 mov rax, qword [rdx + CPUMCPU.Hyper.dr + 2*8]
496 mov dr2, rax
497 mov rax, qword [rdx + CPUMCPU.Hyper.dr + 3*8]
498 mov dr3, rax
499 mov rax, qword [rdx + CPUMCPU.Hyper.dr + 6*8]
500 mov dr6, rax ; not required for AMD-V
501
502 and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_DEBUG_REGS_HYPER
503 or dword [rdx + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HYPER
504
505htg_debug_done:
506
507%ifdef VBOX_WITH_CRASHDUMP_MAGIC
508 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 9
509%endif
510
511 ;
512 ; "Call" the specified helper function.
513 ;
514
515 ; parameter for all helper functions (pCtx)
516 DEBUG64_CHAR('9')
517 lea rsi, [rdx + CPUMCPU.Guest.fpu]
518 lea rax, [htg_return wrt rip]
519 push rax ; return address
520
521 cmp r9d, HM64ON32OP_VMXRCStartVM64
522 jz NAME(VMXRCStartVM64)
523 cmp r9d, HM64ON32OP_SVMRCVMRun64
524 jz NAME(SVMRCVMRun64)
525 cmp r9d, HM64ON32OP_HMRCSaveGuestFPU64
526 jz NAME(HMRCSaveGuestFPU64)
527 cmp r9d, HM64ON32OP_HMRCSaveGuestDebug64
528 jz NAME(HMRCSaveGuestDebug64)
529 cmp r9d, HM64ON32OP_HMRCTestSwitcher64
530 jz NAME(HMRCTestSwitcher64)
531 mov eax, VERR_HM_INVALID_HM64ON32OP
532htg_return:
533 DEBUG64_CHAR('r')
534
535 ; Load CPUM pointer into rdx
536 mov rdx, [NAME(pCpumIC) wrt rip]
537 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
538
539%ifdef VBOX_WITH_CRASHDUMP_MAGIC
540 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 10
541%endif
542
543 ; Save the return code
544 mov dword [rdx + CPUMCPU.u32RetCode], eax
545
546 ; now let's switch back
547 jmp NAME(vmmRCToHostAsm) ; rax = returncode.
548
549ENDPROC vmmR0ToRawModeAsm
550
551
552
553
554;
555;
556; HM code (used to be HMRCA.asm at one point).
557; HM code (used to be HMRCA.asm at one point).
558; HM code (used to be HMRCA.asm at one point).
559;
560;
561
562
563
564; Load the corresponding guest MSR (trashes rdx & rcx)
565%macro LOADGUESTMSR 2
566 mov rcx, %1
567 mov edx, dword [rsi + %2 + 4]
568 mov eax, dword [rsi + %2]
569 wrmsr
570%endmacro
571
572; Save a guest MSR (trashes rdx & rcx)
573; Only really useful for gs kernel base as that one can be changed behind our back (swapgs)
574%macro SAVEGUESTMSR 2
575 mov rcx, %1
576 rdmsr
577 mov dword [rsi + %2], eax
578 mov dword [rsi + %2 + 4], edx
579%endmacro
580
581;; @def MYPUSHSEGS
582; Macro saving all segment registers on the stack.
583; @param 1 full width register name
584%macro MYPUSHSEGS 1
585 mov %1, es
586 push %1
587 mov %1, ds
588 push %1
589%endmacro
590
591;; @def MYPOPSEGS
592; Macro restoring all segment registers on the stack
593; @param 1 full width register name
594%macro MYPOPSEGS 1
595 pop %1
596 mov ds, %1
597 pop %1
598 mov es, %1
599%endmacro
600
601
602;/**
603; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
604; *
605; * @returns VBox status code
606; * @param HCPhysCpuPage VMXON physical address [rsp+8]
607; * @param HCPhysVmcs VMCS physical address [rsp+16]
608; * @param pCache VMCS cache [rsp+24]
609; * @param pCtx Guest context (rsi)
610; */
611BEGINPROC VMXRCStartVM64
612 push rbp
613 mov rbp, rsp
614
615 ; Make sure VT-x instructions are allowed.
616 mov rax, cr4
617 or rax, X86_CR4_VMXE
618 mov cr4, rax
619
620 ; Enter VMX Root Mode.
621 vmxon [rbp + 8 + 8]
622 jnc .vmxon_success
623 mov rax, VERR_VMX_INVALID_VMXON_PTR
624 jmp .vmstart64_vmxon_failed
625
626.vmxon_success:
627 jnz .vmxon_success2
628 mov rax, VERR_VMX_VMXON_FAILED
629 jmp .vmstart64_vmxon_failed
630
631.vmxon_success2:
632 ; Activate the VMCS pointer
633 vmptrld [rbp + 16 + 8]
634 jnc .vmptrld_success
635 mov rax, VERR_VMX_INVALID_VMCS_PTR
636 jmp .vmstart64_vmxoff_end
637
638.vmptrld_success:
639 jnz .vmptrld_success2
640 mov rax, VERR_VMX_VMPTRLD_FAILED
641 jmp .vmstart64_vmxoff_end
642
643.vmptrld_success2:
644
645 ; Save the VMCS pointer on the stack
646 push qword [rbp + 16 + 8];
647
648 ; Save segment registers.
649 MYPUSHSEGS rax
650
651%ifdef VMX_USE_CACHED_VMCS_ACCESSES
652 ; Flush the VMCS write cache first (before any other vmreads/vmwrites!).
653 mov rbx, [rbp + 24 + 8] ; pCache
654
655 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
656 mov qword [rbx + VMCSCACHE.uPos], 2
657 %endif
658
659 %ifdef DEBUG
660 mov rax, [rbp + 8 + 8] ; HCPhysCpuPage
661 mov [rbx + VMCSCACHE.TestIn.HCPhysCpuPage], rax
662 mov rax, [rbp + 16 + 8] ; HCPhysVmcs
663 mov [rbx + VMCSCACHE.TestIn.HCPhysVmcs], rax
664 mov [rbx + VMCSCACHE.TestIn.pCache], rbx
665 mov [rbx + VMCSCACHE.TestIn.pCtx], rsi
666 %endif
667
668 mov ecx, [rbx + VMCSCACHE.Write.cValidEntries]
669 cmp ecx, 0
670 je .no_cached_writes
671 mov rdx, rcx
672 mov rcx, 0
673 jmp .cached_write
674
675ALIGN(16)
676.cached_write:
677 mov eax, [rbx + VMCSCACHE.Write.aField + rcx*4]
678 vmwrite rax, qword [rbx + VMCSCACHE.Write.aFieldVal + rcx*8]
679 inc rcx
680 cmp rcx, rdx
681 jl .cached_write
682
683 mov dword [rbx + VMCSCACHE.Write.cValidEntries], 0
684.no_cached_writes:
685
686 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
687 mov qword [rbx + VMCSCACHE.uPos], 3
688 %endif
689 ; Save the pCache pointer.
690 push rbx
691%endif
692
693 ; Save the host state that's relevant in the temporary 64-bit mode.
694 mov rdx, cr0
695 mov eax, VMX_VMCS_HOST_CR0
696 vmwrite rax, rdx
697
698 mov rdx, cr3
699 mov eax, VMX_VMCS_HOST_CR3
700 vmwrite rax, rdx
701
702 mov rdx, cr4
703 mov eax, VMX_VMCS_HOST_CR4
704 vmwrite rax, rdx
705
706 mov rdx, cs
707 mov eax, VMX_VMCS_HOST_FIELD_CS
708 vmwrite rax, rdx
709
710 mov rdx, ss
711 mov eax, VMX_VMCS_HOST_FIELD_SS
712 vmwrite rax, rdx
713
714 sub rsp, 8*2
715 sgdt [rsp]
716 mov eax, VMX_VMCS_HOST_GDTR_BASE
717 vmwrite rax, [rsp+2]
718 add rsp, 8*2
719
720%ifdef VBOX_WITH_CRASHDUMP_MAGIC
721 mov qword [rbx + VMCSCACHE.uPos], 4
722%endif
723
724 ; Hopefully we can ignore TR (we restore it anyway on the way back to 32-bit mode).
725
726 ; First we have to save some final CPU context registers.
727 lea rdx, [.vmlaunch64_done wrt rip]
728 mov rax, VMX_VMCS_HOST_RIP ; Return address (too difficult to continue after VMLAUNCH?).
729 vmwrite rax, rdx
730 ; Note: assumes success!
731
732 ; Manual save and restore:
733 ; - General purpose registers except RIP, RSP
734 ;
735 ; Trashed:
736 ; - CR2 (we don't care)
737 ; - LDTR (reset to 0)
738 ; - DRx (presumably not changed at all)
739 ; - DR7 (reset to 0x400)
740 ; - EFLAGS (reset to RT_BIT(1); not relevant)
741
742%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
743 ; Load the guest LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs.
744 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
745 LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
746 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
747 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
748%endif
749
750%ifdef VBOX_WITH_CRASHDUMP_MAGIC
751 mov qword [rbx + VMCSCACHE.uPos], 5
752%endif
753
754 ; Save the pCtx pointer
755 push rsi
756
757 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
758 mov rbx, qword [rsi + CPUMCTX.cr2]
759 mov rdx, cr2
760 cmp rdx, rbx
761 je .skipcr2write64
762 mov cr2, rbx
763
764.skipcr2write64:
765 mov eax, VMX_VMCS_HOST_RSP
766 vmwrite rax, rsp
767 ; Note: assumes success!
768 ; Don't mess with ESP anymore!!!
769
770 ; Save Guest's general purpose registers.
771 mov rax, qword [rsi + CPUMCTX.eax]
772 mov rbx, qword [rsi + CPUMCTX.ebx]
773 mov rcx, qword [rsi + CPUMCTX.ecx]
774 mov rdx, qword [rsi + CPUMCTX.edx]
775 mov rbp, qword [rsi + CPUMCTX.ebp]
776 mov r8, qword [rsi + CPUMCTX.r8]
777 mov r9, qword [rsi + CPUMCTX.r9]
778 mov r10, qword [rsi + CPUMCTX.r10]
779 mov r11, qword [rsi + CPUMCTX.r11]
780 mov r12, qword [rsi + CPUMCTX.r12]
781 mov r13, qword [rsi + CPUMCTX.r13]
782 mov r14, qword [rsi + CPUMCTX.r14]
783 mov r15, qword [rsi + CPUMCTX.r15]
784
785 ; Save rdi & rsi.
786 mov rdi, qword [rsi + CPUMCTX.edi]
787 mov rsi, qword [rsi + CPUMCTX.esi]
788
789 vmlaunch
790 jmp .vmlaunch64_done; ; Here if vmlaunch detected a failure.
791
792ALIGNCODE(16)
793.vmlaunch64_done:
794 jc near .vmstart64_invalid_vmcs_ptr
795 jz near .vmstart64_start_failed
796
797 push rdi
798 mov rdi, [rsp + 8] ; pCtx
799
800 mov qword [rdi + CPUMCTX.eax], rax
801 mov qword [rdi + CPUMCTX.ebx], rbx
802 mov qword [rdi + CPUMCTX.ecx], rcx
803 mov qword [rdi + CPUMCTX.edx], rdx
804 mov qword [rdi + CPUMCTX.esi], rsi
805 mov qword [rdi + CPUMCTX.ebp], rbp
806 mov qword [rdi + CPUMCTX.r8], r8
807 mov qword [rdi + CPUMCTX.r9], r9
808 mov qword [rdi + CPUMCTX.r10], r10
809 mov qword [rdi + CPUMCTX.r11], r11
810 mov qword [rdi + CPUMCTX.r12], r12
811 mov qword [rdi + CPUMCTX.r13], r13
812 mov qword [rdi + CPUMCTX.r14], r14
813 mov qword [rdi + CPUMCTX.r15], r15
814 mov rax, cr2
815 mov qword [rdi + CPUMCTX.cr2], rax
816
817 pop rax ; The guest edi we pushed above
818 mov qword [rdi + CPUMCTX.edi], rax
819
820 pop rsi ; pCtx (needed in rsi by the macros below)
821
822%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
823 SAVEGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
824 SAVEGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
825 SAVEGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
826 SAVEGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
827%endif
828
829%ifdef VMX_USE_CACHED_VMCS_ACCESSES
830 pop rdi ; Saved pCache
831
832 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
833 mov dword [rdi + VMCSCACHE.uPos], 7
834 %endif
835 %ifdef DEBUG
836 mov [rdi + VMCSCACHE.TestOut.pCache], rdi
837 mov [rdi + VMCSCACHE.TestOut.pCtx], rsi
838 mov rax, cr8
839 mov [rdi + VMCSCACHE.TestOut.cr8], rax
840 %endif
841
842 mov ecx, [rdi + VMCSCACHE.Read.cValidEntries]
843 cmp ecx, 0 ; Can't happen
844 je .no_cached_reads
845 jmp .cached_read
846
847ALIGN(16)
848.cached_read:
849 dec rcx
850 mov eax, [rdi + VMCSCACHE.Read.aField + rcx*4]
851 vmread qword [rdi + VMCSCACHE.Read.aFieldVal + rcx*8], rax
852 cmp rcx, 0
853 jnz .cached_read
854.no_cached_reads:
855 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
856 mov dword [rdi + VMCSCACHE.uPos], 8
857 %endif
858%endif
859
860 ; Restore segment registers.
861 MYPOPSEGS rax
862
863 mov eax, VINF_SUCCESS
864
865%ifdef VBOX_WITH_CRASHDUMP_MAGIC
866 mov dword [rdi + VMCSCACHE.uPos], 9
867%endif
868.vmstart64_end:
869
870%ifdef VMX_USE_CACHED_VMCS_ACCESSES
871 %ifdef DEBUG
872 mov rdx, [rsp] ; HCPhysVmcs
873 mov [rdi + VMCSCACHE.TestOut.HCPhysVmcs], rdx
874 %endif
875%endif
876
877 ; Write back the data and disable the VMCS.
878 vmclear qword [rsp] ; Pushed pVMCS
879 add rsp, 8
880
881.vmstart64_vmxoff_end:
882 ; Disable VMX root mode.
883 vmxoff
884.vmstart64_vmxon_failed:
885%ifdef VMX_USE_CACHED_VMCS_ACCESSES
886 %ifdef DEBUG
887 cmp eax, VINF_SUCCESS
888 jne .skip_flags_save
889
890 pushf
891 pop rdx
892 mov [rdi + VMCSCACHE.TestOut.eflags], rdx
893 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
894 mov dword [rdi + VMCSCACHE.uPos], 12
895 %endif
896.skip_flags_save:
897 %endif
898%endif
899 pop rbp
900 ret
901
902
903.vmstart64_invalid_vmcs_ptr:
904 pop rsi ; pCtx (needed in rsi by the macros below)
905
906%ifdef VMX_USE_CACHED_VMCS_ACCESSES
907 pop rdi ; pCache
908 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
909 mov dword [rdi + VMCSCACHE.uPos], 10
910 %endif
911
912 %ifdef DEBUG
913 mov [rdi + VMCSCACHE.TestOut.pCache], rdi
914 mov [rdi + VMCSCACHE.TestOut.pCtx], rsi
915 %endif
916%endif
917
918 ; Restore segment registers.
919 MYPOPSEGS rax
920
921 ; Restore all general purpose host registers.
922 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
923 jmp .vmstart64_end
924
925.vmstart64_start_failed:
926 pop rsi ; pCtx (needed in rsi by the macros below)
927
928%ifdef VMX_USE_CACHED_VMCS_ACCESSES
929 pop rdi ; pCache
930
931 %ifdef DEBUG
932 mov [rdi + VMCSCACHE.TestOut.pCache], rdi
933 mov [rdi + VMCSCACHE.TestOut.pCtx], rsi
934 %endif
935 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
936 mov dword [rdi + VMCSCACHE.uPos], 11
937 %endif
938%endif
939
940 ; Restore segment registers.
941 MYPOPSEGS rax
942
943 ; Restore all general purpose host registers.
944 mov eax, VERR_VMX_UNABLE_TO_START_VM
945 jmp .vmstart64_end
946ENDPROC VMXRCStartVM64
947
948
949;/**
950; * Prepares for and executes VMRUN (64 bits guests)
951; *
952; * @returns VBox status code
953; * @param HCPhysVMCB Physical address of host VMCB (rsp+8)
954; * @param HCPhysVMCB Physical address of guest VMCB (rsp+16)
955; * @param pCtx Guest context (rsi)
956; */
957BEGINPROC SVMRCVMRun64
958 push rbp
959 mov rbp, rsp
960 pushf
961
962 ; Manual save and restore:
963 ; - General purpose registers except RIP, RSP, RAX
964 ;
965 ; Trashed:
966 ; - CR2 (we don't care)
967 ; - LDTR (reset to 0)
968 ; - DRx (presumably not changed at all)
969 ; - DR7 (reset to 0x400)
970
971 ; Save the Guest CPU context pointer.
972 push rsi ; Push for saving the state at the end
973
974 ; Save host fs, gs, sysenter msr etc
975 mov rax, [rbp + 8 + 8] ; pVMCBHostPhys (64 bits physical address)
976 push rax ; Save for the vmload after vmrun
977 vmsave
978
979 ; Setup eax for VMLOAD
980 mov rax, [rbp + 8 + 8 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address)
981
982 ; Restore Guest's general purpose registers.
983 ; rax is loaded from the VMCB by VMRUN.
984 mov rbx, qword [rsi + CPUMCTX.ebx]
985 mov rcx, qword [rsi + CPUMCTX.ecx]
986 mov rdx, qword [rsi + CPUMCTX.edx]
987 mov rdi, qword [rsi + CPUMCTX.edi]
988 mov rbp, qword [rsi + CPUMCTX.ebp]
989 mov r8, qword [rsi + CPUMCTX.r8]
990 mov r9, qword [rsi + CPUMCTX.r9]
991 mov r10, qword [rsi + CPUMCTX.r10]
992 mov r11, qword [rsi + CPUMCTX.r11]
993 mov r12, qword [rsi + CPUMCTX.r12]
994 mov r13, qword [rsi + CPUMCTX.r13]
995 mov r14, qword [rsi + CPUMCTX.r14]
996 mov r15, qword [rsi + CPUMCTX.r15]
997 mov rsi, qword [rsi + CPUMCTX.esi]
998
999 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
1000 clgi
1001 sti
1002
1003 ; Load guest fs, gs, sysenter msr etc
1004 vmload
1005 ; Run the VM
1006 vmrun
1007
1008 ; rax is in the VMCB already; we can use it here.
1009
1010 ; Save guest fs, gs, sysenter msr etc.
1011 vmsave
1012
1013 ; Load host fs, gs, sysenter msr etc.
1014 pop rax ; Pushed above
1015 vmload
1016
1017 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
1018 cli
1019 stgi
1020
1021 pop rax ; pCtx
1022
1023 mov qword [rax + CPUMCTX.ebx], rbx
1024 mov qword [rax + CPUMCTX.ecx], rcx
1025 mov qword [rax + CPUMCTX.edx], rdx
1026 mov qword [rax + CPUMCTX.esi], rsi
1027 mov qword [rax + CPUMCTX.edi], rdi
1028 mov qword [rax + CPUMCTX.ebp], rbp
1029 mov qword [rax + CPUMCTX.r8], r8
1030 mov qword [rax + CPUMCTX.r9], r9
1031 mov qword [rax + CPUMCTX.r10], r10
1032 mov qword [rax + CPUMCTX.r11], r11
1033 mov qword [rax + CPUMCTX.r12], r12
1034 mov qword [rax + CPUMCTX.r13], r13
1035 mov qword [rax + CPUMCTX.r14], r14
1036 mov qword [rax + CPUMCTX.r15], r15
1037
1038 mov eax, VINF_SUCCESS
1039
1040 popf
1041 pop rbp
1042 ret
1043ENDPROC SVMRCVMRun64
1044
1045;/**
1046; * Saves the guest FPU context
1047; *
1048; * @returns VBox status code
1049; * @param pCtx Guest context [rsi]
1050; */
1051BEGINPROC HMRCSaveGuestFPU64
1052 mov rax, cr0
1053 mov rcx, rax ; save old CR0
1054 and rax, ~(X86_CR0_TS | X86_CR0_EM)
1055 mov cr0, rax
1056
1057 fxsave [rsi + CPUMCTX.fpu]
1058
1059 mov cr0, rcx ; and restore old CR0 again
1060
1061 mov eax, VINF_SUCCESS
1062 ret
1063ENDPROC HMRCSaveGuestFPU64
1064
1065;/**
1066; * Saves the guest debug context (DR0-3, DR6)
1067; *
1068; * @returns VBox status code
1069; * @param pCtx Guest context [rsi]
1070; */
1071BEGINPROC HMRCSaveGuestDebug64
1072 mov rax, dr0
1073 mov qword [rsi + CPUMCTX.dr + 0*8], rax
1074 mov rax, dr1
1075 mov qword [rsi + CPUMCTX.dr + 1*8], rax
1076 mov rax, dr2
1077 mov qword [rsi + CPUMCTX.dr + 2*8], rax
1078 mov rax, dr3
1079 mov qword [rsi + CPUMCTX.dr + 3*8], rax
1080 mov rax, dr6
1081 mov qword [rsi + CPUMCTX.dr + 6*8], rax
1082 mov eax, VINF_SUCCESS
1083 ret
1084ENDPROC HMRCSaveGuestDebug64
1085
1086;/**
1087; * Dummy callback handler
1088; *
1089; * @returns VBox status code
1090; * @param param1 Parameter 1 [rsp+8]
1091; * @param param2 Parameter 2 [rsp+12]
1092; * @param param3 Parameter 3 [rsp+16]
1093; * @param param4 Parameter 4 [rsp+20]
1094; * @param param5 Parameter 5 [rsp+24]
1095; * @param pCtx Guest context [rsi]
1096; */
1097BEGINPROC HMRCTestSwitcher64
1098 mov eax, [rsp+8]
1099 ret
1100ENDPROC HMRCTestSwitcher64
1101
1102
1103
1104
1105;
1106;
1107; Back to switcher code.
1108; Back to switcher code.
1109; Back to switcher code.
1110;
1111;
1112
1113
1114
1115;;
1116; Trampoline for doing a call when starting the hyper visor execution.
1117;
1118; Push any arguments to the routine.
1119; Push the argument frame size (cArg * 4).
1120; Push the call target (_cdecl convention).
1121; Push the address of this routine.
1122;
1123;
1124BITS 64
1125ALIGNCODE(16)
1126BEGINPROC vmmRCCallTrampoline
1127%ifdef DEBUG_STUFF
1128 COM64_S_CHAR 'c'
1129 COM64_S_CHAR 't'
1130 COM64_S_CHAR '!'
1131%endif
1132 int3
1133ENDPROC vmmRCCallTrampoline
1134
1135
1136;;
1137; The C interface.
1138;
1139BITS 64
1140ALIGNCODE(16)
1141BEGINPROC vmmRCToHost
1142%ifdef DEBUG_STUFF
1143 push rsi
1144 COM_NEWLINE
1145 COM_CHAR 'b'
1146 COM_CHAR 'a'
1147 COM_CHAR 'c'
1148 COM_CHAR 'k'
1149 COM_CHAR '!'
1150 COM_NEWLINE
1151 pop rsi
1152%endif
1153 int3
1154ENDPROC vmmRCToHost
1155
1156;;
1157; vmmRCToHostAsm
1158;
1159; This is an alternative entry point which we'll be using
1160; when the we have saved the guest state already or we haven't
1161; been messing with the guest at all.
1162;
1163; @param eax Return code.
1164; @uses eax, edx, ecx (or it may use them in the future)
1165;
1166BITS 64
1167ALIGNCODE(16)
1168BEGINPROC vmmRCToHostAsm
1169NAME(vmmRCToHostAsmNoReturn):
1170 ;; We're still in the intermediate memory context!
1171
1172 ;;
1173 ;; Switch to compatibility mode, placing ourselves in identity mapped code.
1174 ;;
1175 jmp far [NAME(fpIDEnterTarget) wrt rip]
1176
1177; 16:32 Pointer to IDEnterTarget.
1178NAME(fpIDEnterTarget):
1179 FIXUP FIX_ID_32BIT, 0, NAME(IDExitTarget) - NAME(Start)
1180dd 0
1181 FIXUP FIX_HYPER_CS, 0
1182dd 0
1183
1184 ; We're now on identity mapped pages!
1185ALIGNCODE(16)
1186GLOBALNAME IDExitTarget
1187BITS 32
1188 DEBUG32_CHAR('1')
1189
1190 ; 1. Deactivate long mode by turning off paging.
1191 mov ebx, cr0
1192 and ebx, ~X86_CR0_PG
1193 mov cr0, ebx
1194 DEBUG32_CHAR('2')
1195
1196 ; 2. Load intermediate page table.
1197 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
1198 mov edx, 0ffffffffh
1199 mov cr3, edx
1200 DEBUG32_CHAR('3')
1201
1202 ; 3. Disable long mode.
1203 mov ecx, MSR_K6_EFER
1204 rdmsr
1205 DEBUG32_CHAR('5')
1206 and eax, ~(MSR_K6_EFER_LME)
1207 wrmsr
1208 DEBUG32_CHAR('6')
1209
1210%ifndef NEED_PAE_ON_HOST
1211 ; 3b. Disable PAE.
1212 mov eax, cr4
1213 and eax, ~X86_CR4_PAE
1214 mov cr4, eax
1215 DEBUG32_CHAR('7')
1216%endif
1217
1218 ; 4. Enable paging.
1219 or ebx, X86_CR0_PG
1220 mov cr0, ebx
1221 jmp short just_a_jump
1222just_a_jump:
1223 DEBUG32_CHAR('8')
1224
1225 ;;
1226 ;; 5. Jump to guest code mapping of the code and load the Hypervisor CS.
1227 ;;
1228 FIXUP FIX_ID_2_HC_NEAR_REL, 1, NAME(ICExitTarget) - NAME(Start)
1229 jmp near NAME(ICExitTarget)
1230
1231 ;;
1232 ;; When we arrive at this label we're at the
1233 ;; intermediate mapping of the switching code.
1234 ;;
1235BITS 32
1236ALIGNCODE(16)
1237GLOBALNAME ICExitTarget
1238 DEBUG32_CHAR('8')
1239
1240 ; load the hypervisor data selector into ds & es
1241 FIXUP FIX_HYPER_DS, 1
1242 mov eax, 0ffffh
1243 mov ds, eax
1244 mov es, eax
1245
1246 FIXUP FIX_GC_CPUM_OFF, 1, 0
1247 mov edx, 0ffffffffh
1248 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
1249 mov esi, [edx + CPUMCPU.Host.cr3]
1250 mov cr3, esi
1251
1252 ;; now we're in host memory context, let's restore regs
1253 FIXUP FIX_HC_CPUM_OFF, 1, 0
1254 mov edx, 0ffffffffh
1255 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
1256
1257 ; restore the host EFER
1258 mov ebx, edx
1259 mov ecx, MSR_K6_EFER
1260 mov eax, [ebx + CPUMCPU.Host.efer]
1261 mov edx, [ebx + CPUMCPU.Host.efer + 4]
1262 wrmsr
1263 mov edx, ebx
1264
1265 ; activate host gdt and idt
1266 lgdt [edx + CPUMCPU.Host.gdtr]
1267 DEBUG32_CHAR('0')
1268 lidt [edx + CPUMCPU.Host.idtr]
1269 DEBUG32_CHAR('1')
1270
1271 ; Restore TSS selector; must mark it as not busy before using ltr (!)
1272 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
1273 movzx eax, word [edx + CPUMCPU.Host.tr] ; eax <- TR
1274 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
1275 add eax, [edx + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
1276 and dword [eax + 4], ~0200h ; clear busy flag (2nd type2 bit)
1277 ltr word [edx + CPUMCPU.Host.tr]
1278
1279 ; activate ldt
1280 DEBUG32_CHAR('2')
1281 lldt [edx + CPUMCPU.Host.ldtr]
1282
1283 ; Restore segment registers
1284 mov eax, [edx + CPUMCPU.Host.ds]
1285 mov ds, eax
1286 mov eax, [edx + CPUMCPU.Host.es]
1287 mov es, eax
1288 mov eax, [edx + CPUMCPU.Host.fs]
1289 mov fs, eax
1290 mov eax, [edx + CPUMCPU.Host.gs]
1291 mov gs, eax
1292 ; restore stack
1293 lss esp, [edx + CPUMCPU.Host.esp]
1294
1295 ; Control registers.
1296 mov ecx, [edx + CPUMCPU.Host.cr4]
1297 mov cr4, ecx
1298 mov ecx, [edx + CPUMCPU.Host.cr0]
1299 mov cr0, ecx
1300 ;mov ecx, [edx + CPUMCPU.Host.cr2] ; assumes this is waste of time.
1301 ;mov cr2, ecx
1302
1303 ; restore general registers.
1304 mov edi, [edx + CPUMCPU.Host.edi]
1305 mov esi, [edx + CPUMCPU.Host.esi]
1306 mov ebx, [edx + CPUMCPU.Host.ebx]
1307 mov ebp, [edx + CPUMCPU.Host.ebp]
1308
1309 ; store the return code in eax
1310 mov eax, [edx + CPUMCPU.u32RetCode]
1311 retf
1312ENDPROC vmmRCToHostAsm
1313
1314
1315GLOBALNAME End
1316;
1317; The description string (in the text section).
1318;
1319NAME(Description):
1320 db SWITCHER_DESCRIPTION
1321 db 0
1322
1323extern NAME(Relocate)
1324
1325;
1326; End the fixup records.
1327;
1328BEGINDATA
1329 db FIX_THE_END ; final entry.
1330GLOBALNAME FixupsEnd
1331
1332;;
1333; The switcher definition structure.
1334ALIGNDATA(16)
1335GLOBALNAME Def
1336 istruc VMMSWITCHERDEF
1337 at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
1338 at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
1339 at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
1340 at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF NAME(Relocate)
1341 at VMMSWITCHERDEF.enmType, dd SWITCHER_TYPE
1342 at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
1343 at VMMSWITCHERDEF.offR0ToRawMode, dd NAME(vmmR0ToRawMode) - NAME(Start)
1344 at VMMSWITCHERDEF.offRCToHost, dd NAME(vmmRCToHost) - NAME(Start)
1345 at VMMSWITCHERDEF.offRCCallTrampoline, dd NAME(vmmRCCallTrampoline) - NAME(Start)
1346 at VMMSWITCHERDEF.offRCToHostAsm, dd NAME(vmmRCToHostAsm) - NAME(Start)
1347 at VMMSWITCHERDEF.offRCToHostAsmNoReturn, dd NAME(vmmRCToHostAsmNoReturn) - NAME(Start)
1348 ; disasm help
1349 at VMMSWITCHERDEF.offHCCode0, dd 0
1350 at VMMSWITCHERDEF.cbHCCode0, dd NAME(IDEnterTarget) - NAME(Start)
1351 at VMMSWITCHERDEF.offHCCode1, dd NAME(ICExitTarget) - NAME(Start)
1352 at VMMSWITCHERDEF.cbHCCode1, dd NAME(End) - NAME(ICExitTarget)
1353 at VMMSWITCHERDEF.offIDCode0, dd NAME(IDEnterTarget) - NAME(Start)
1354 at VMMSWITCHERDEF.cbIDCode0, dd NAME(ICEnterTarget) - NAME(IDEnterTarget)
1355 at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start)
1356 at VMMSWITCHERDEF.cbIDCode1, dd NAME(ICExitTarget) - NAME(Start)
1357 at VMMSWITCHERDEF.offGCCode, dd 0
1358 at VMMSWITCHERDEF.cbGCCode, dd 0
1359
1360 iend
1361
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette