VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMSwitcher/LegacyandAMD64.mac@ 58591

Last change on this file since 58591 was 58123, checked in by vboxsync, 9 years ago

VMM: Made @param pVCpu more uniform and to the point.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 53.6 KB
Line 
1; $Id: LegacyandAMD64.mac 58123 2015-10-08 18:09:45Z vboxsync $
2;; @file
3; VMM - World Switchers, 32-bit to AMD64 intermediate context.
4;
5; This is used for running 64-bit guest on 32-bit hosts, not
6; normal raw-mode. All the code involved is contained in this
7; file.
8;
9
10;
11; Copyright (C) 2006-2015 Oracle Corporation
12;
13; This file is part of VirtualBox Open Source Edition (OSE), as
14; available from http://www.virtualbox.org. This file is free software;
15; you can redistribute it and/or modify it under the terms of the GNU
16; General Public License (GPL) as published by the Free Software
17; Foundation, in version 2 as it comes in the "COPYING" file of the
18; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
19; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
20;
21
22
23;*******************************************************************************
24;* Defined Constants And Macros *
25;*******************************************************************************
26;; @note These values are from the HM64ON32OP enum in hm.h.
27%define HM64ON32OP_VMXRCStartVM64 1
28%define HM64ON32OP_SVMRCVMRun64 2
29%define HM64ON32OP_HMRCSaveGuestFPU64 3
30%define HM64ON32OP_HMRCSaveGuestDebug64 4
31%define HM64ON32OP_HMRCTestSwitcher64 5
32
33;;
34; This macro is used for storing a debug code in a CMOS location.
35;
36; If we tripple fault or something, the debug code can be retrieved and we
37; might have a clue as to where the problem occurred. The code is currently
38; using CMOS register 3 in the 2nd bank as this _seems_ to be unused on my
39; Extreme4 X79 asrock mainboard.
40;
41; @param %1 The debug code (byte)
42; @note Trashes AL.
43;
44%macro DEBUG_CMOS_TRASH_AL 1
45%ifdef VBOX_WITH_64ON32_CMOS_DEBUG
46 mov al, 3
47 out 72h, al
48 mov al, %1
49 out 73h, al
50 in al, 73h
51%endif
52%endmacro
53
54;;
55; Version of DEBUG_CMOS_TRASH_AL that saves AL on the stack and therefore
56; doesn't trash any registers.
57;
58%macro DEBUG_CMOS_STACK64 1
59%ifdef VBOX_WITH_64ON32_CMOS_DEBUG
60 push rax
61 DEBUG_CMOS_TRASH_AL %1
62 pop rax
63%endif
64%endmacro
65
66;;
67; Version of DEBUG_CMOS_TRASH_AL that saves AL on the stack and therefore
68; doesn't trash any registers.
69;
70%macro DEBUG_CMOS_STACK32 1
71%ifdef VBOX_WITH_64ON32_CMOS_DEBUG
72 push eax
73 DEBUG_CMOS_TRASH_AL %1
74 pop eax
75%endif
76%endmacro
77
78
79;; Stubs for making OS/2 compile (though, not work).
80%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
81 %macro vmwrite 2,
82 int3
83 %endmacro
84 %define vmlaunch int3
85 %define vmresume int3
86 %define vmsave int3
87 %define vmload int3
88 %define vmrun int3
89 %define clgi int3
90 %define stgi int3
91 %macro invlpga 2,
92 int3
93 %endmacro
94%endif
95
96;; Debug options
97;%define DEBUG_STUFF 1
98;%define STRICT_IF 1
99
100
101;*******************************************************************************
102;* Header Files *
103;*******************************************************************************
104%include "VBox/asmdefs.mac"
105%include "iprt/x86.mac"
106%include "VBox/err.mac"
107%include "VBox/apic.mac"
108
109%include "VBox/vmm/cpum.mac"
110%include "VBox/vmm/stam.mac"
111%include "VBox/vmm/vm.mac"
112%include "VBox/vmm/hm_vmx.mac"
113%include "CPUMInternal.mac"
114%include "HMInternal.mac"
115%include "VMMSwitcher.mac"
116
117
118;
119; Start the fixup records
120; We collect the fixups in the .data section as we go along
121; It is therefore VITAL that no-one is using the .data section
122; for anything else between 'Start' and 'End'.
123;
124BEGINDATA
125GLOBALNAME Fixups
126
127
128
129BEGINCODE
130GLOBALNAME Start
131
132BITS 32
133
134;;
135; The C interface.
136; @param [esp + 04h] Param 1 - VM handle
137; @param [esp + 08h] Param 2 - Offset from VM::CPUM to the CPUMCPU
138; structure for the calling EMT.
139;
140BEGINPROC vmmR0ToRawMode
141%ifdef DEBUG_STUFF
142 COM32_S_NEWLINE
143 COM32_S_CHAR '^'
144%endif
145
146%ifdef VBOX_WITH_STATISTICS
147 ;
148 ; Switcher stats.
149 ;
150 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToGC
151 mov edx, 0ffffffffh
152 STAM_PROFILE_ADV_START edx
153%endif
154
155 push ebp
156 mov ebp, [esp + 12] ; CPUMCPU offset
157
158 ; turn off interrupts
159 pushf
160 cli
161 ;DEBUG_CMOS_STACK32 10h
162
163 ;
164 ; Call worker.
165 ;
166 FIXUP FIX_HC_CPUM_OFF, 1, 0
167 mov edx, 0ffffffffh
168 push cs ; allow for far return and restore cs correctly.
169 call NAME(vmmR0ToRawModeAsm)
170
171%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
172 ; Restore blocked Local APIC NMI vectors
173 ; Do this here to ensure the host CS is already restored
174 mov ecx, [edx + CPUMCPU.fApicDisVectors]
175 test ecx, ecx
176 jz gth_apic_done
177 cmp byte [edx + CPUMCPU.fX2Apic], 1
178 je gth_x2apic
179
180 ; Legacy xAPIC mode:
181 mov edx, [edx + CPUMCPU.pvApicBase]
182 shr ecx, 1
183 jnc gth_nolint0
184 and dword [edx + APIC_REG_LVT_LINT0], ~APIC_REG_LVT_MASKED
185gth_nolint0:
186 shr ecx, 1
187 jnc gth_nolint1
188 and dword [edx + APIC_REG_LVT_LINT1], ~APIC_REG_LVT_MASKED
189gth_nolint1:
190 shr ecx, 1
191 jnc gth_nopc
192 and dword [edx + APIC_REG_LVT_PC], ~APIC_REG_LVT_MASKED
193gth_nopc:
194 shr ecx, 1
195 jnc gth_notherm
196 and dword [edx + APIC_REG_LVT_THMR], ~APIC_REG_LVT_MASKED
197gth_notherm:
198 shr ecx, 1
199 jnc gth_nocmci
200 and dword [edx + APIC_REG_LVT_CMCI], ~APIC_REG_LVT_MASKED
201gth_nocmci:
202 jmp gth_apic_done
203
204 ; x2APIC mode:
205gth_x2apic:
206 ;DEBUG_CMOS_STACK32 7ch
207 push eax ; save eax
208 push ebx ; save it for fApicDisVectors
209 push edx ; save edx just in case.
210 mov ebx, ecx ; ebx = fApicDisVectors, ecx free for MSR use
211 shr ebx, 1
212 jnc gth_x2_nolint0
213 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT0 >> 4)
214 rdmsr
215 and eax, ~APIC_REG_LVT_MASKED
216 wrmsr
217gth_x2_nolint0:
218 shr ebx, 1
219 jnc gth_x2_nolint1
220 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT1 >> 4)
221 rdmsr
222 and eax, ~APIC_REG_LVT_MASKED
223 wrmsr
224gth_x2_nolint1:
225 shr ebx, 1
226 jnc gth_x2_nopc
227 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_PC >> 4)
228 rdmsr
229 and eax, ~APIC_REG_LVT_MASKED
230 wrmsr
231gth_x2_nopc:
232 shr ebx, 1
233 jnc gth_x2_notherm
234 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_THMR >> 4)
235 rdmsr
236 and eax, ~APIC_REG_LVT_MASKED
237 wrmsr
238gth_x2_notherm:
239 shr ebx, 1
240 jnc gth_x2_nocmci
241 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_CMCI >> 4)
242 rdmsr
243 and eax, ~APIC_REG_LVT_MASKED
244 wrmsr
245gth_x2_nocmci:
246 pop edx
247 pop ebx
248 pop eax
249
250gth_apic_done:
251%endif
252
253 ; restore original flags
254 ;DEBUG_CMOS_STACK32 7eh
255 popf
256 pop ebp
257
258%ifdef VBOX_WITH_STATISTICS
259 ;
260 ; Switcher stats.
261 ;
262 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToHC
263 mov edx, 0ffffffffh
264 STAM_PROFILE_ADV_STOP edx
265%endif
266
267 ;DEBUG_CMOS_STACK32 7fh
268 ret
269
270ENDPROC vmmR0ToRawMode
271
272; *****************************************************************************
273; vmmR0ToRawModeAsm
274;
275; Phase one of the switch from host to guest context (host MMU context)
276;
277; INPUT:
278; - edx virtual address of CPUM structure (valid in host context)
279; - ebp offset of the CPUMCPU structure relative to CPUM.
280;
281; USES/DESTROYS:
282; - eax, ecx, edx, esi
283;
284; ASSUMPTION:
285; - current CS and DS selectors are wide open
286;
287; *****************************************************************************
288ALIGNCODE(16)
289BEGINPROC vmmR0ToRawModeAsm
290 ;;
291 ;; Save CPU host context
292 ;; Skip eax, edx and ecx as these are not preserved over calls.
293 ;;
294 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
295%ifdef VBOX_WITH_CRASHDUMP_MAGIC
296 ; phys address of scratch page
297 mov eax, dword [edx + CPUMCPU.Guest.dr + 4*8]
298 mov cr2, eax
299
300 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 1
301%endif
302
303 ; general registers.
304 mov [edx + CPUMCPU.Host.ebx], ebx
305 mov [edx + CPUMCPU.Host.edi], edi
306 mov [edx + CPUMCPU.Host.esi], esi
307 mov [edx + CPUMCPU.Host.esp], esp
308 mov [edx + CPUMCPU.Host.ebp], ebp ; offCpumCpu!
309 ; selectors.
310 mov [edx + CPUMCPU.Host.ds], ds
311 mov [edx + CPUMCPU.Host.es], es
312 mov [edx + CPUMCPU.Host.fs], fs
313 mov [edx + CPUMCPU.Host.gs], gs
314 mov [edx + CPUMCPU.Host.ss], ss
315 ; special registers.
316 DEBUG32_S_CHAR('s')
317 DEBUG32_S_CHAR(';')
318 sldt [edx + CPUMCPU.Host.ldtr]
319 sidt [edx + CPUMCPU.Host.idtr]
320 sgdt [edx + CPUMCPU.Host.gdtr]
321 str [edx + CPUMCPU.Host.tr]
322
323%ifdef VBOX_WITH_CRASHDUMP_MAGIC
324 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 2
325%endif
326
327%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
328 ; Block Local APIC NMI vectors
329 DEBUG32_S_CHAR('f')
330 DEBUG32_S_CHAR(';')
331 cmp byte [edx + CPUMCPU.fX2Apic], 1
332 je htg_x2apic
333
334 ; Legacy xAPIC mode. No write completion required when writing to the
335 ; LVT registers as we have mapped the APIC pages as non-cacheable and
336 ; the MMIO is CPU-local.
337 mov ebx, [edx + CPUMCPU.pvApicBase]
338 or ebx, ebx
339 jz htg_apic_done
340 mov eax, [ebx + APIC_REG_LVT_LINT0]
341 mov ecx, eax
342 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
343 cmp ecx, APIC_REG_LVT_MODE_NMI
344 jne htg_nolint0
345 or edi, 0x01
346 or eax, APIC_REG_LVT_MASKED
347 mov [ebx + APIC_REG_LVT_LINT0], eax
348htg_nolint0:
349 mov eax, [ebx + APIC_REG_LVT_LINT1]
350 mov ecx, eax
351 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
352 cmp ecx, APIC_REG_LVT_MODE_NMI
353 jne htg_nolint1
354 or edi, 0x02
355 or eax, APIC_REG_LVT_MASKED
356 mov [ebx + APIC_REG_LVT_LINT1], eax
357htg_nolint1:
358 mov eax, [ebx + APIC_REG_LVT_PC]
359 mov ecx, eax
360 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
361 cmp ecx, APIC_REG_LVT_MODE_NMI
362 jne htg_nopc
363 or edi, 0x04
364 or eax, APIC_REG_LVT_MASKED
365 mov [ebx + APIC_REG_LVT_PC], eax
366htg_nopc:
367 mov eax, [ebx + APIC_REG_VERSION]
368 shr eax, 16
369 cmp al, 5
370 jb htg_notherm
371 je htg_nocmci
372 mov eax, [ebx + APIC_REG_LVT_CMCI]
373 mov ecx, eax
374 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
375 cmp ecx, APIC_REG_LVT_MODE_NMI
376 jne htg_nocmci
377 or edi, 0x10
378 or eax, APIC_REG_LVT_MASKED
379 mov [ebx + APIC_REG_LVT_CMCI], eax
380htg_nocmci:
381 mov eax, [ebx + APIC_REG_LVT_THMR]
382 mov ecx, eax
383 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
384 cmp ecx, APIC_REG_LVT_MODE_NMI
385 jne htg_notherm
386 or edi, 0x08
387 or eax, APIC_REG_LVT_MASKED
388 mov [ebx + APIC_REG_LVT_THMR], eax
389htg_notherm:
390 mov [edx + CPUMCPU.fApicDisVectors], edi
391 jmp htg_apic_done
392
393 ; x2APIC mode:
394htg_x2apic:
395 mov esi, edx ; Save edx.
396 xor edi, edi ; fApicDisVectors
397
398 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT0 >> 4)
399 rdmsr
400 mov ebx, eax
401 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
402 cmp ebx, APIC_REG_LVT_MODE_NMI
403 jne htg_x2_nolint0
404 or edi, 0x01
405 or eax, APIC_REG_LVT_MASKED
406 wrmsr
407htg_x2_nolint0:
408 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT1 >> 4)
409 rdmsr
410 mov ebx, eax
411 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
412 cmp ebx, APIC_REG_LVT_MODE_NMI
413 jne htg_x2_nolint1
414 or edi, 0x02
415 or eax, APIC_REG_LVT_MASKED
416 wrmsr
417htg_x2_nolint1:
418 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_PC >> 4)
419 rdmsr
420 mov ebx, eax
421 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
422 cmp ebx, APIC_REG_LVT_MODE_NMI
423 jne htg_x2_nopc
424 or edi, 0x04
425 or eax, APIC_REG_LVT_MASKED
426 wrmsr
427htg_x2_nopc:
428 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_VERSION >> 4)
429 rdmsr
430 shr eax, 16
431 cmp al, 5
432 jb htg_x2_notherm
433 je htg_x2_nocmci
434 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_CMCI >> 4)
435 rdmsr
436 mov ebx, eax
437 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
438 cmp ebx, APIC_REG_LVT_MODE_NMI
439 jne htg_x2_nocmci
440 or edi, 0x10
441 or eax, APIC_REG_LVT_MASKED
442 wrmsr
443htg_x2_nocmci:
444 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_THMR >> 4)
445 rdmsr
446 mov ebx, eax
447 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
448 cmp ebx, APIC_REG_LVT_MODE_NMI
449 jne htg_x2_notherm
450 or edi, 0x08
451 or eax, APIC_REG_LVT_MASKED
452 wrmsr
453htg_x2_notherm:
454 mov edx, esi ; Restore edx.
455 mov [edx + CPUMCPU.fApicDisVectors], edi
456
457htg_apic_done:
458%endif
459
460 ; control registers.
461 mov eax, cr0
462 mov [edx + CPUMCPU.Host.cr0], eax
463 ;Skip cr2; assume host os don't stuff things in cr2. (safe)
464 mov eax, cr3
465 mov [edx + CPUMCPU.Host.cr3], eax
466 mov esi, cr4 ; esi = cr4, we'll modify it further down.
467 mov [edx + CPUMCPU.Host.cr4], esi
468
469 DEBUG32_S_CHAR('c')
470 DEBUG32_S_CHAR(';')
471
472 ; save the host EFER msr
473 mov ebx, edx
474 mov ecx, MSR_K6_EFER
475 rdmsr
476 mov [ebx + CPUMCPU.Host.efer], eax
477 mov [ebx + CPUMCPU.Host.efer + 4], edx
478 mov edx, ebx
479 DEBUG32_S_CHAR('e')
480 DEBUG32_S_CHAR(';')
481
482%ifdef VBOX_WITH_CRASHDUMP_MAGIC
483 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 3
484%endif
485
486 ; Load new gdt so we can do a far jump after going into 64 bits mode
487 ;DEBUG_CMOS_STACK32 16h
488 lgdt [edx + CPUMCPU.Hyper.gdtr]
489
490 DEBUG32_S_CHAR('g')
491 DEBUG32_S_CHAR('!')
492%ifdef VBOX_WITH_CRASHDUMP_MAGIC
493 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 4
494%endif
495
496 ;;
497 ;; Clean up CR4. X86_CR4_PGE, X86_CR4_PCE, X86_CR4_PCIDE (not really
498 ;; relevant for 32-bit, but whatever) and X86_CR4_VMXE must be cleared.
499 ;;
500 and esi, X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE | X86_CR4_PSE | X86_CR4_PAE \
501 | X86_CR4_MCE | X86_CR4_OSFXSR | X86_CR4_OSXMMEEXCPT | X86_CR4_SMXE | X86_CR4_OSXSAVE
502 mov cr4, esi
503
504 ;;
505 ;; Load Intermediate memory context.
506 ;;
507 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
508 mov eax, 0ffffffffh
509 mov cr3, eax
510 DEBUG32_CHAR('?')
511%ifdef VBOX_WITH_64ON32_CMOS_DEBUG
512 DEBUG_CMOS_TRASH_AL 17h
513%endif
514
515 ;;
516 ;; Jump to identity mapped location
517 ;;
518 FIXUP FIX_HC_2_ID_NEAR_REL, 1, NAME(IDEnterTarget) - NAME(Start)
519 jmp near NAME(IDEnterTarget)
520
521
522 ; We're now on identity mapped pages!
523ALIGNCODE(16)
524GLOBALNAME IDEnterTarget
525 DEBUG32_CHAR('1')
526 DEBUG_CMOS_TRASH_AL 19h
527
528 ; 1. Disable paging.
529 mov ebx, cr0
530 and ebx, ~X86_CR0_PG
531 mov cr0, ebx
532 DEBUG32_CHAR('2')
533 DEBUG_CMOS_TRASH_AL 1ah
534
535%ifdef VBOX_WITH_CRASHDUMP_MAGIC
536 mov eax, cr2
537 mov dword [eax], 3
538%endif
539
540 ; 2. Enable PAE.
541 mov ecx, cr4
542 or ecx, X86_CR4_PAE
543 mov cr4, ecx
544 DEBUG_CMOS_TRASH_AL 1bh
545
546 ; 3. Load long mode intermediate CR3.
547 FIXUP FIX_INTER_AMD64_CR3, 1
548 mov ecx, 0ffffffffh
549 mov cr3, ecx
550 DEBUG32_CHAR('3')
551 DEBUG_CMOS_TRASH_AL 1ch
552
553%ifdef VBOX_WITH_CRASHDUMP_MAGIC
554 mov eax, cr2
555 mov dword [eax], 4
556%endif
557
558 ; 4. Enable long mode.
559 mov esi, edx
560 mov ecx, MSR_K6_EFER
561 rdmsr
562 FIXUP FIX_EFER_OR_MASK, 1
563 or eax, 0ffffffffh
564 and eax, ~(MSR_K6_EFER_FFXSR) ; turn off fast fxsave/fxrstor (skipping xmm regs)
565 wrmsr
566 mov edx, esi
567 DEBUG32_CHAR('4')
568 DEBUG_CMOS_TRASH_AL 1dh
569
570%ifdef VBOX_WITH_CRASHDUMP_MAGIC
571 mov eax, cr2
572 mov dword [eax], 5
573%endif
574
575 ; 5. Enable paging.
576 or ebx, X86_CR0_PG
577 ; Disable ring 0 write protection too
578 and ebx, ~X86_CR0_WRITE_PROTECT
579 mov cr0, ebx
580 DEBUG32_CHAR('5')
581
582 ; Jump from compatibility mode to 64-bit mode.
583 FIXUP FIX_ID_FAR32_TO_64BIT_MODE, 1, NAME(IDEnter64Mode) - NAME(Start)
584 jmp 0ffffh:0fffffffeh
585
586 ;
587 ; We're in 64-bit mode (ds, ss, es, fs, gs are all bogus).
588BITS 64
589ALIGNCODE(16)
590NAME(IDEnter64Mode):
591 DEBUG64_CHAR('6')
592 DEBUG_CMOS_TRASH_AL 1eh
593 jmp [NAME(pICEnterTarget) wrt rip]
594
595; 64-bit jump target
596NAME(pICEnterTarget):
597FIXUP FIX_HC_64BIT_NOCHECK, 0, NAME(ICEnterTarget) - NAME(Start)
598dq 0ffffffffffffffffh
599
600; 64-bit pCpum address.
601NAME(pCpumIC):
602FIXUP FIX_GC_64_BIT_CPUM_OFF, 0, 0
603dq 0ffffffffffffffffh
604
605%ifdef VBOX_WITH_CRASHDUMP_MAGIC
606NAME(pMarker):
607db 'Switch_marker'
608%endif
609
610 ;
611 ; When we arrive here we're in 64 bits mode in the intermediate context
612 ;
613ALIGNCODE(16)
614GLOBALNAME ICEnterTarget
615 ;DEBUG_CMOS_TRASH_AL 1fh
616 ; Load CPUM pointer into rdx
617 mov rdx, [NAME(pCpumIC) wrt rip]
618 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
619
620 mov rax, cs
621 mov ds, rax
622 mov es, rax
623
624 ; Invalidate fs & gs
625 mov rax, 0
626 mov fs, rax
627 mov gs, rax
628
629%ifdef VBOX_WITH_CRASHDUMP_MAGIC
630 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 5
631%endif
632
633 ; Setup stack.
634 DEBUG64_CHAR('7')
635 mov rsp, 0
636 mov eax, [rdx + CPUMCPU.Hyper.ss.Sel]
637 mov ss, ax
638 mov esp, [rdx + CPUMCPU.Hyper.esp]
639
640%ifdef VBOX_WITH_CRASHDUMP_MAGIC
641 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 6
642%endif
643
644%ifdef VBOX_WITH_64ON32_IDT
645 ; Set up emergency trap handlers.
646 lidt [rdx + CPUMCPU.Hyper.idtr]
647%endif
648
649 DEBUG64_S_CHAR('8')
650
651 ; Check if we need to restore the guest FPU state
652 mov esi, [rdx + CPUMCPU.fUseFlags] ; esi == use flags.
653 test esi, CPUM_SYNC_FPU_STATE
654 jz near htg_fpu_no
655
656%ifdef VBOX_WITH_CRASHDUMP_MAGIC
657 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 7
658%endif
659
660 mov rax, cr0
661 mov rcx, rax ; save old CR0
662 and rax, ~(X86_CR0_TS | X86_CR0_EM)
663 mov cr0, rax
664
665 mov eax, [rdx + CPUMCPU.Guest.fXStateMask]
666 mov ebx, [rdx + CPUMCPU.Guest.pXStateRC]
667 or eax, eax
668 jz htg_fpu_fxrstor
669 mov r9, rdx
670 mov edx, [rdx + CPUMCPU.Guest.fXStateMask + 4]
671 o64 xsave [rbx]
672 mov rdx, r9
673 jmp htg_fpu_done
674htg_fpu_fxrstor:
675 o64 fxrstor [rbx] ; (use explicit REX prefix, see @bugref{6398})
676htg_fpu_done:
677 mov cr0, rcx ; and restore old CR0 again
678
679 and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_FPU_STATE
680
681htg_fpu_no:
682 ; Check if we need to restore the guest debug state
683 test esi, CPUM_SYNC_DEBUG_REGS_GUEST | CPUM_SYNC_DEBUG_REGS_HYPER
684 jz htg_debug_done
685
686%ifdef VBOX_WITH_CRASHDUMP_MAGIC
687 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 8
688%endif
689 test esi, CPUM_SYNC_DEBUG_REGS_HYPER
690 jnz htg_debug_hyper
691
692 ; Guest values in DRx, letting the guest access them directly.
693 mov rax, qword [rdx + CPUMCPU.Guest.dr + 0*8]
694 mov dr0, rax
695 mov rax, qword [rdx + CPUMCPU.Guest.dr + 1*8]
696 mov dr1, rax
697 mov rax, qword [rdx + CPUMCPU.Guest.dr + 2*8]
698 mov dr2, rax
699 mov rax, qword [rdx + CPUMCPU.Guest.dr + 3*8]
700 mov dr3, rax
701 mov rax, qword [rdx + CPUMCPU.Guest.dr + 6*8]
702 mov dr6, rax ; not required for AMD-V
703
704 and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_DEBUG_REGS_GUEST
705 or dword [rdx + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_GUEST
706 jmp htg_debug_done
707
708htg_debug_hyper:
709 ; Combined values in DRx, intercepting all accesses.
710 mov rax, qword [rdx + CPUMCPU.Hyper.dr + 0*8]
711 mov dr0, rax
712 mov rax, qword [rdx + CPUMCPU.Hyper.dr + 1*8]
713 mov dr1, rax
714 mov rax, qword [rdx + CPUMCPU.Hyper.dr + 2*8]
715 mov dr2, rax
716 mov rax, qword [rdx + CPUMCPU.Hyper.dr + 3*8]
717 mov dr3, rax
718 mov rax, qword [rdx + CPUMCPU.Hyper.dr + 6*8]
719 mov dr6, rax ; not required for AMD-V
720
721 and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_DEBUG_REGS_HYPER
722 or dword [rdx + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HYPER
723
724htg_debug_done:
725
726%ifdef VBOX_WITH_CRASHDUMP_MAGIC
727 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 9
728%endif
729
730 ;
731 ; "Call" the specified helper function.
732 ;
733
734 ; parameter for all helper functions (pCtx)
735 DEBUG64_CHAR('9')
736 lea rsi, [rdx + CPUMCPU.Guest]
737 lea rax, [htg_return wrt rip]
738 push rax ; return address
739
740 ; load the hypervisor function address
741 mov r9, [rdx + CPUMCPU.Hyper.eip]
742 cmp r9d, HM64ON32OP_VMXRCStartVM64
743 jz NAME(VMXRCStartVM64)
744 cmp r9d, HM64ON32OP_SVMRCVMRun64
745 jz NAME(SVMRCVMRun64)
746 cmp r9d, HM64ON32OP_HMRCSaveGuestFPU64
747 jz NAME(HMRCSaveGuestFPU64)
748 cmp r9d, HM64ON32OP_HMRCSaveGuestDebug64
749 jz NAME(HMRCSaveGuestDebug64)
750 cmp r9d, HM64ON32OP_HMRCTestSwitcher64
751 jz NAME(HMRCTestSwitcher64)
752 mov eax, VERR_HM_INVALID_HM64ON32OP
753htg_return:
754 DEBUG64_CHAR('r')
755
756 ; Load CPUM pointer into rdx
757 mov rdx, [NAME(pCpumIC) wrt rip]
758 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
759
760%ifdef VBOX_WITH_CRASHDUMP_MAGIC
761 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 10
762%endif
763
764 ; Save the return code
765 mov dword [rdx + CPUMCPU.u32RetCode], eax
766
767 ; now let's switch back
768 jmp NAME(vmmRCToHostAsm) ; rax = returncode.
769
770ENDPROC vmmR0ToRawModeAsm
771
772
773
774
775;
776;
777; HM code (used to be HMRCA.asm at one point).
778; HM code (used to be HMRCA.asm at one point).
779; HM code (used to be HMRCA.asm at one point).
780;
781;
782
783;; @def MYPUSHSEGS
784; Macro saving all segment registers on the stack.
785; @param 1 full width register name
786%macro MYPUSHSEGS 1
787 mov %1, es
788 push %1
789 mov %1, ds
790 push %1
791%endmacro
792
793;; @def MYPOPSEGS
794; Macro restoring all segment registers on the stack
795; @param 1 full width register name
796%macro MYPOPSEGS 1
797 pop %1
798 mov ds, %1
799 pop %1
800 mov es, %1
801%endmacro
802
803
804;/**
805; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
806; *
807; * @returns VBox status code
808; * @param HCPhysCpuPage VMXON physical address [rsp+8]
809; * @param HCPhysVmcs VMCS physical address [rsp+16]
810; * @param pCache VMCS cache [rsp+24]
811; * @param pVM The cross context VM structure. [rbp+28h]
812; * @param pVCpu The cross context virtual CPU structure. [rbp+30h]
813; * @param pCtx Guest context (rsi)
814; */
815BEGINPROC VMXRCStartVM64
816 push rbp
817 mov rbp, rsp
818 DEBUG_CMOS_STACK64 20h
819
820 ; Make sure VT-x instructions are allowed.
821 mov rax, cr4
822 or rax, X86_CR4_VMXE
823 mov cr4, rax
824
825 ; Enter VMX Root Mode.
826 vmxon [rbp + 8 + 8]
827 jnc .vmxon_success
828 mov rax, VERR_VMX_INVALID_VMXON_PTR
829 jmp .vmstart64_vmxon_failed
830
831.vmxon_success:
832 jnz .vmxon_success2
833 mov rax, VERR_VMX_VMXON_FAILED
834 jmp .vmstart64_vmxon_failed
835
836.vmxon_success2:
837 ; Activate the VMCS pointer
838 vmptrld [rbp + 16 + 8]
839 jnc .vmptrld_success
840 mov rax, VERR_VMX_INVALID_VMCS_PTR
841 jmp .vmstart64_vmxoff_end
842
843.vmptrld_success:
844 jnz .vmptrld_success2
845 mov rax, VERR_VMX_VMPTRLD_FAILED
846 jmp .vmstart64_vmxoff_end
847
848.vmptrld_success2:
849
850 ; Save the VMCS pointer on the stack
851 push qword [rbp + 16 + 8];
852
853 ; Save segment registers.
854 MYPUSHSEGS rax
855
856%ifdef VMX_USE_CACHED_VMCS_ACCESSES
857 ; Flush the VMCS write cache first (before any other vmreads/vmwrites!).
858 mov rbx, [rbp + 24 + 8] ; pCache
859
860 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
861 mov qword [rbx + VMCSCACHE.uPos], 2
862 %endif
863
864 %ifdef DEBUG
865 mov rax, [rbp + 8 + 8] ; HCPhysCpuPage
866 mov [rbx + VMCSCACHE.TestIn.HCPhysCpuPage], rax
867 mov rax, [rbp + 16 + 8] ; HCPhysVmcs
868 mov [rbx + VMCSCACHE.TestIn.HCPhysVmcs], rax
869 mov [rbx + VMCSCACHE.TestIn.pCache], rbx
870 mov [rbx + VMCSCACHE.TestIn.pCtx], rsi
871 %endif
872
873 mov ecx, [rbx + VMCSCACHE.Write.cValidEntries]
874 cmp ecx, 0
875 je .no_cached_writes
876 mov rdx, rcx
877 mov rcx, 0
878 jmp .cached_write
879
880ALIGN(16)
881.cached_write:
882 mov eax, [rbx + VMCSCACHE.Write.aField + rcx*4]
883 vmwrite rax, qword [rbx + VMCSCACHE.Write.aFieldVal + rcx*8]
884 inc rcx
885 cmp rcx, rdx
886 jl .cached_write
887
888 mov dword [rbx + VMCSCACHE.Write.cValidEntries], 0
889.no_cached_writes:
890
891 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
892 mov qword [rbx + VMCSCACHE.uPos], 3
893 %endif
894 ; Save the pCache pointer.
895 push rbx
896%endif
897
898 ; Save the host state that's relevant in the temporary 64-bit mode.
899 mov rdx, cr0
900 mov eax, VMX_VMCS_HOST_CR0
901 vmwrite rax, rdx
902
903 mov rdx, cr3
904 mov eax, VMX_VMCS_HOST_CR3
905 vmwrite rax, rdx
906
907 mov rdx, cr4
908 mov eax, VMX_VMCS_HOST_CR4
909 vmwrite rax, rdx
910
911 mov rdx, cs
912 mov eax, VMX_VMCS_HOST_FIELD_CS
913 vmwrite rax, rdx
914
915 mov rdx, ss
916 mov eax, VMX_VMCS_HOST_FIELD_SS
917 vmwrite rax, rdx
918
919%if 0 ; Another experiment regarding tripple faults... Seems not to be necessary.
920 sub rsp, 16
921 str [rsp]
922 movsx rdx, word [rsp]
923 mov eax, VMX_VMCS_HOST_FIELD_TR
924 vmwrite rax, rdx
925 add rsp, 16
926%endif
927
928 sub rsp, 16
929 sgdt [rsp + 6] ; (The 64-bit base should be aligned, not the word.)
930 mov eax, VMX_VMCS_HOST_GDTR_BASE
931 vmwrite rax, [rsp + 6 + 2]
932 add rsp, 16
933
934%ifdef VBOX_WITH_64ON32_IDT
935 sub rsp, 16
936 sidt [rsp + 6]
937 mov eax, VMX_VMCS_HOST_IDTR_BASE
938 vmwrite rax, [rsp + 6 + 2] ; [rsi + CPUMCPU.Hyper.idtr + 2] - why doesn't this work?
939 add rsp, 16
940 ;call NAME(vmm64On32PrintIdtr)
941%endif
942
943%ifdef VBOX_WITH_CRASHDUMP_MAGIC
944 mov qword [rbx + VMCSCACHE.uPos], 4
945%endif
946
947 ; Hopefully we can ignore TR (we restore it anyway on the way back to 32-bit mode).
948
949 ; First we have to save some final CPU context registers.
950 lea rdx, [.vmlaunch64_done wrt rip]
951 mov rax, VMX_VMCS_HOST_RIP ; Return address (too difficult to continue after VMLAUNCH?).
952 vmwrite rax, rdx
953 ; Note: assumes success!
954
955 ; Manual save and restore:
956 ; - General purpose registers except RIP, RSP
957 ; - XCR0
958 ;
959 ; Trashed:
960 ; - CR2 (we don't care)
961 ; - LDTR (reset to 0)
962 ; - DRx (presumably not changed at all)
963 ; - DR7 (reset to 0x400)
964 ; - EFLAGS (reset to RT_BIT(1); not relevant)
965
966%ifdef VBOX_WITH_CRASHDUMP_MAGIC
967 mov qword [rbx + VMCSCACHE.uPos], 5
968%endif
969
970 ;
971 ; Save the host XCR0 and load the guest one if necessary.
972 ; Note! Trashes rdx and rcx.
973 ;
974 mov rax, [rbp + 30h] ; pVCpu
975 test byte [rax + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1
976 jz .xcr0_before_skip
977
978 xor ecx, ecx
979 xgetbv ; Save the host one on the stack.
980 push rdx
981 push rax
982
983 mov eax, [xSI + CPUMCTX.aXcr] ; Load the guest one.
984 mov edx, [xSI + CPUMCTX.aXcr + 4]
985 xor ecx, ecx ; paranoia
986 xsetbv
987
988 push 0 ; Indicate that we must restore XCR0 (popped into ecx, thus 0).
989 jmp .xcr0_before_done
990
991.xcr0_before_skip:
992 push 3fh ; indicate that we need not.
993.xcr0_before_done:
994
995 ; Save the pCtx pointer
996 push rsi
997
998 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
999 mov rbx, qword [rsi + CPUMCTX.cr2]
1000 mov rdx, cr2
1001 cmp rdx, rbx
1002 je .skipcr2write64
1003 mov cr2, rbx
1004
1005.skipcr2write64:
1006 mov eax, VMX_VMCS_HOST_RSP
1007 vmwrite rax, rsp
1008 ; Note: assumes success!
1009 ; Don't mess with ESP anymore!!!
1010
1011 ; Save Guest's general purpose registers.
1012 mov rax, qword [rsi + CPUMCTX.eax]
1013 mov rbx, qword [rsi + CPUMCTX.ebx]
1014 mov rcx, qword [rsi + CPUMCTX.ecx]
1015 mov rdx, qword [rsi + CPUMCTX.edx]
1016 mov rbp, qword [rsi + CPUMCTX.ebp]
1017 mov r8, qword [rsi + CPUMCTX.r8]
1018 mov r9, qword [rsi + CPUMCTX.r9]
1019 mov r10, qword [rsi + CPUMCTX.r10]
1020 mov r11, qword [rsi + CPUMCTX.r11]
1021 mov r12, qword [rsi + CPUMCTX.r12]
1022 mov r13, qword [rsi + CPUMCTX.r13]
1023 mov r14, qword [rsi + CPUMCTX.r14]
1024 mov r15, qword [rsi + CPUMCTX.r15]
1025
1026 ; Save rdi & rsi.
1027 mov rdi, qword [rsi + CPUMCTX.edi]
1028 mov rsi, qword [rsi + CPUMCTX.esi]
1029
1030 vmlaunch
1031 jmp .vmlaunch64_done; ; Here if vmlaunch detected a failure.
1032
1033ALIGNCODE(16)
1034.vmlaunch64_done:
1035%if 0 ;fixme later - def VBOX_WITH_64ON32_IDT
1036 push rdx
1037 mov rdx, [rsp + 8] ; pCtx
1038 lidt [rdx + CPUMCPU.Hyper.idtr]
1039 pop rdx
1040%endif
1041 jc near .vmstart64_invalid_vmcs_ptr
1042 jz near .vmstart64_start_failed
1043
1044 push rdi
1045 mov rdi, [rsp + 8] ; pCtx
1046
1047 mov qword [rdi + CPUMCTX.eax], rax
1048 mov qword [rdi + CPUMCTX.ebx], rbx
1049 mov qword [rdi + CPUMCTX.ecx], rcx
1050 mov qword [rdi + CPUMCTX.edx], rdx
1051 mov qword [rdi + CPUMCTX.esi], rsi
1052 mov qword [rdi + CPUMCTX.ebp], rbp
1053 mov qword [rdi + CPUMCTX.r8], r8
1054 mov qword [rdi + CPUMCTX.r9], r9
1055 mov qword [rdi + CPUMCTX.r10], r10
1056 mov qword [rdi + CPUMCTX.r11], r11
1057 mov qword [rdi + CPUMCTX.r12], r12
1058 mov qword [rdi + CPUMCTX.r13], r13
1059 mov qword [rdi + CPUMCTX.r14], r14
1060 mov qword [rdi + CPUMCTX.r15], r15
1061 mov rax, cr2
1062 mov qword [rdi + CPUMCTX.cr2], rax
1063
1064 pop rax ; The guest edi we pushed above
1065 mov qword [rdi + CPUMCTX.edi], rax
1066
1067 pop rsi ; pCtx (needed in rsi by the macros below)
1068
1069 ; Restore the host xcr0 if necessary.
1070 pop rcx
1071 test ecx, ecx
1072 jnz .xcr0_after_skip
1073 pop rax
1074 pop rdx
1075 xsetbv ; ecx is already zero.
1076.xcr0_after_skip:
1077
1078%ifdef VMX_USE_CACHED_VMCS_ACCESSES
1079 pop rdi ; Saved pCache
1080
1081 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
1082 mov dword [rdi + VMCSCACHE.uPos], 7
1083 %endif
1084 %ifdef DEBUG
1085 mov [rdi + VMCSCACHE.TestOut.pCache], rdi
1086 mov [rdi + VMCSCACHE.TestOut.pCtx], rsi
1087 mov rax, cr8
1088 mov [rdi + VMCSCACHE.TestOut.cr8], rax
1089 %endif
1090
1091 mov ecx, [rdi + VMCSCACHE.Read.cValidEntries]
1092 cmp ecx, 0 ; Can't happen
1093 je .no_cached_reads
1094 jmp .cached_read
1095
1096ALIGN(16)
1097.cached_read:
1098 dec rcx
1099 mov eax, [rdi + VMCSCACHE.Read.aField + rcx*4]
1100 vmread qword [rdi + VMCSCACHE.Read.aFieldVal + rcx*8], rax
1101 cmp rcx, 0
1102 jnz .cached_read
1103.no_cached_reads:
1104 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
1105 mov dword [rdi + VMCSCACHE.uPos], 8
1106 %endif
1107%endif
1108
1109 ; Restore segment registers.
1110 MYPOPSEGS rax
1111
1112 mov eax, VINF_SUCCESS
1113
1114%ifdef VBOX_WITH_CRASHDUMP_MAGIC
1115 mov dword [rdi + VMCSCACHE.uPos], 9
1116%endif
1117.vmstart64_end:
1118
1119%ifdef VMX_USE_CACHED_VMCS_ACCESSES
1120 %ifdef DEBUG
1121 mov rdx, [rsp] ; HCPhysVmcs
1122 mov [rdi + VMCSCACHE.TestOut.HCPhysVmcs], rdx
1123 %endif
1124%endif
1125
1126 ; Write back the data and disable the VMCS.
1127 vmclear qword [rsp] ; Pushed pVMCS
1128 add rsp, 8
1129
1130.vmstart64_vmxoff_end:
1131 ; Disable VMX root mode.
1132 vmxoff
1133.vmstart64_vmxon_failed:
1134%ifdef VMX_USE_CACHED_VMCS_ACCESSES
1135 %ifdef DEBUG
1136 cmp eax, VINF_SUCCESS
1137 jne .skip_flags_save
1138
1139 pushf
1140 pop rdx
1141 mov [rdi + VMCSCACHE.TestOut.eflags], rdx
1142 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
1143 mov dword [rdi + VMCSCACHE.uPos], 12
1144 %endif
1145.skip_flags_save:
1146 %endif
1147%endif
1148 pop rbp
1149 ret
1150
1151
1152.vmstart64_invalid_vmcs_ptr:
1153 pop rsi ; pCtx (needed in rsi by the macros below)
1154
1155 ; Restore the host xcr0 if necessary.
1156 pop rcx
1157 test ecx, ecx
1158 jnz .xcr0_after_skip2
1159 pop rax
1160 pop rdx
1161 xsetbv ; ecx is already zero.
1162.xcr0_after_skip2:
1163
1164%ifdef VMX_USE_CACHED_VMCS_ACCESSES
1165 pop rdi ; pCache
1166 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
1167 mov dword [rdi + VMCSCACHE.uPos], 10
1168 %endif
1169
1170 %ifdef DEBUG
1171 mov [rdi + VMCSCACHE.TestOut.pCache], rdi
1172 mov [rdi + VMCSCACHE.TestOut.pCtx], rsi
1173 %endif
1174%endif
1175
1176 ; Restore segment registers.
1177 MYPOPSEGS rax
1178
1179 ; Restore all general purpose host registers.
1180 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
1181 jmp .vmstart64_end
1182
1183.vmstart64_start_failed:
1184 pop rsi ; pCtx (needed in rsi by the macros below)
1185
1186 ; Restore the host xcr0 if necessary.
1187 pop rcx
1188 test ecx, ecx
1189 jnz .xcr0_after_skip3
1190 pop rax
1191 pop rdx
1192 xsetbv ; ecx is already zero.
1193.xcr0_after_skip3:
1194
1195%ifdef VMX_USE_CACHED_VMCS_ACCESSES
1196 pop rdi ; pCache
1197
1198 %ifdef DEBUG
1199 mov [rdi + VMCSCACHE.TestOut.pCache], rdi
1200 mov [rdi + VMCSCACHE.TestOut.pCtx], rsi
1201 %endif
1202 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
1203 mov dword [rdi + VMCSCACHE.uPos], 11
1204 %endif
1205%endif
1206
1207 ; Restore segment registers.
1208 MYPOPSEGS rax
1209
1210 ; Restore all general purpose host registers.
1211 mov eax, VERR_VMX_UNABLE_TO_START_VM
1212 jmp .vmstart64_end
1213ENDPROC VMXRCStartVM64
1214
1215
1216;;
1217; Prepares for and executes VMRUN (64 bits guests)
1218;
1219; @returns VBox status code
1220; @param HCPhysVMCB Physical address of host VMCB [rbp+10h]
1221; @param HCPhysVMCB Physical address of guest VMCB [rbp+18h]
1222; @param pVM The cross context VM structure. [rbp+20h]
1223; @param pVCpu The cross context virtual CPU structure. [rbp+28h]
1224; @param pCtx Guest context [rsi]
1225;
1226BEGINPROC SVMRCVMRun64
1227 push rbp
1228 mov rbp, rsp
1229 pushf
1230 DEBUG_CMOS_STACK64 30h
1231
1232 ; Manual save and restore:
1233 ; - General purpose registers except RIP, RSP, RAX
1234 ;
1235 ; Trashed:
1236 ; - CR2 (we don't care)
1237 ; - LDTR (reset to 0)
1238 ; - DRx (presumably not changed at all)
1239 ; - DR7 (reset to 0x400)
1240
1241 ;
1242 ; Save the host XCR0 and load the guest one if necessary.
1243 ;
1244 mov rax, [rbp + 28h] ; pVCpu
1245 test byte [rax + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1
1246 jz .xcr0_before_skip
1247
1248 xor ecx, ecx
1249 xgetbv ; Save the host one on the stack.
1250 push rdx
1251 push rax
1252
1253 mov eax, [xSI + CPUMCTX.aXcr] ; Load the guest one.
1254 mov edx, [xSI + CPUMCTX.aXcr + 4]
1255 xor ecx, ecx ; paranoia
1256 xsetbv
1257
1258 push 0 ; Indicate that we must restore XCR0 (popped into ecx, thus 0).
1259 jmp .xcr0_before_done
1260
1261.xcr0_before_skip:
1262 push 3fh ; indicate that we need not.
1263.xcr0_before_done:
1264
1265 ; Save the Guest CPU context pointer.
1266 push rsi ; Push for saving the state at the end
1267
1268 ; Save host fs, gs, sysenter msr etc
1269 mov rax, [rbp + 8 + 8] ; pVMCBHostPhys (64 bits physical address)
1270 push rax ; Save for the vmload after vmrun
1271 vmsave
1272
1273 ; Setup eax for VMLOAD
1274 mov rax, [rbp + 8 + 8 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address)
1275
1276 ; Restore Guest's general purpose registers.
1277 ; rax is loaded from the VMCB by VMRUN.
1278 mov rbx, qword [rsi + CPUMCTX.ebx]
1279 mov rcx, qword [rsi + CPUMCTX.ecx]
1280 mov rdx, qword [rsi + CPUMCTX.edx]
1281 mov rdi, qword [rsi + CPUMCTX.edi]
1282 mov rbp, qword [rsi + CPUMCTX.ebp]
1283 mov r8, qword [rsi + CPUMCTX.r8]
1284 mov r9, qword [rsi + CPUMCTX.r9]
1285 mov r10, qword [rsi + CPUMCTX.r10]
1286 mov r11, qword [rsi + CPUMCTX.r11]
1287 mov r12, qword [rsi + CPUMCTX.r12]
1288 mov r13, qword [rsi + CPUMCTX.r13]
1289 mov r14, qword [rsi + CPUMCTX.r14]
1290 mov r15, qword [rsi + CPUMCTX.r15]
1291 mov rsi, qword [rsi + CPUMCTX.esi]
1292
1293 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
1294 clgi
1295 sti
1296
1297 ; Load guest fs, gs, sysenter msr etc
1298 vmload
1299 ; Run the VM
1300 vmrun
1301
1302 ; rax is in the VMCB already; we can use it here.
1303
1304 ; Save guest fs, gs, sysenter msr etc.
1305 vmsave
1306
1307 ; Load host fs, gs, sysenter msr etc.
1308 pop rax ; Pushed above
1309 vmload
1310
1311 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
1312 cli
1313 stgi
1314
1315 pop rax ; pCtx
1316
1317 mov qword [rax + CPUMCTX.ebx], rbx
1318 mov qword [rax + CPUMCTX.ecx], rcx
1319 mov qword [rax + CPUMCTX.edx], rdx
1320 mov qword [rax + CPUMCTX.esi], rsi
1321 mov qword [rax + CPUMCTX.edi], rdi
1322 mov qword [rax + CPUMCTX.ebp], rbp
1323 mov qword [rax + CPUMCTX.r8], r8
1324 mov qword [rax + CPUMCTX.r9], r9
1325 mov qword [rax + CPUMCTX.r10], r10
1326 mov qword [rax + CPUMCTX.r11], r11
1327 mov qword [rax + CPUMCTX.r12], r12
1328 mov qword [rax + CPUMCTX.r13], r13
1329 mov qword [rax + CPUMCTX.r14], r14
1330 mov qword [rax + CPUMCTX.r15], r15
1331
1332 ;
1333 ; Restore the host xcr0 if necessary.
1334 ;
1335 pop rcx
1336 test ecx, ecx
1337 jnz .xcr0_after_skip
1338 pop rax
1339 pop rdx
1340 xsetbv ; ecx is already zero.
1341.xcr0_after_skip:
1342
1343 mov eax, VINF_SUCCESS
1344
1345 popf
1346 pop rbp
1347 ret
1348ENDPROC SVMRCVMRun64
1349
1350;/**
1351; * Saves the guest FPU context
1352; *
1353; * @returns VBox status code
1354; * @param pCtx Guest context [rsi]
1355; */
1356BEGINPROC HMRCSaveGuestFPU64
1357 DEBUG_CMOS_STACK64 40h
1358 mov rax, cr0
1359 mov rcx, rax ; save old CR0
1360 and rax, ~(X86_CR0_TS | X86_CR0_EM)
1361 mov cr0, rax
1362
1363 mov eax, [rsi + CPUMCTX.fXStateMask]
1364 mov ebx, [rsi + CPUMCTX.pXStateRC]
1365 test eax, eax
1366 jz .use_fxsave
1367 mov edx, [rsi + CPUMCTX.fXStateMask + 4]
1368 o64 xsave [rbx]
1369 jmp .done
1370
1371.use_fxsave:
1372 o64 fxsave [rbx] ; (use explicit REX prefix, see @bugref{6398})
1373
1374.done:
1375 mov cr0, rcx ; and restore old CR0 again
1376
1377 mov eax, VINF_SUCCESS
1378 ret
1379ENDPROC HMRCSaveGuestFPU64
1380
1381;/**
1382; * Saves the guest debug context (DR0-3, DR6)
1383; *
1384; * @returns VBox status code
1385; * @param pCtx Guest context [rsi]
1386; */
1387BEGINPROC HMRCSaveGuestDebug64
1388 DEBUG_CMOS_STACK64 41h
1389 mov rax, dr0
1390 mov qword [rsi + CPUMCTX.dr + 0*8], rax
1391 mov rax, dr1
1392 mov qword [rsi + CPUMCTX.dr + 1*8], rax
1393 mov rax, dr2
1394 mov qword [rsi + CPUMCTX.dr + 2*8], rax
1395 mov rax, dr3
1396 mov qword [rsi + CPUMCTX.dr + 3*8], rax
1397 mov rax, dr6
1398 mov qword [rsi + CPUMCTX.dr + 6*8], rax
1399 mov eax, VINF_SUCCESS
1400 ret
1401ENDPROC HMRCSaveGuestDebug64
1402
1403;/**
1404; * Dummy callback handler
1405; *
1406; * @returns VBox status code
1407; * @param param1 Parameter 1 [rsp+8]
1408; * @param param2 Parameter 2 [rsp+12]
1409; * @param param3 Parameter 3 [rsp+16]
1410; * @param param4 Parameter 4 [rsp+20]
1411; * @param param5 Parameter 5 [rsp+24]
1412; * @param pCtx Guest context [rsi]
1413; */
1414BEGINPROC HMRCTestSwitcher64
1415 DEBUG_CMOS_STACK64 42h
1416 mov eax, [rsp+8]
1417 ret
1418ENDPROC HMRCTestSwitcher64
1419
1420
1421%ifdef VBOX_WITH_64ON32_IDT
1422;
1423; Trap handling.
1424;
1425
1426;; Here follows an array of trap handler entry points, 8 byte in size.
1427BEGINPROC vmm64On32TrapHandlers
1428%macro vmm64On32TrapEntry 1
1429GLOBALNAME vmm64On32Trap %+ i
1430 db 06ah, i ; push imm8 - note that this is a signextended value.
1431 jmp NAME(%1)
1432 ALIGNCODE(8)
1433%assign i i+1
1434%endmacro
1435%assign i 0 ; start counter.
1436 vmm64On32TrapEntry vmm64On32Trap ; 0
1437 vmm64On32TrapEntry vmm64On32Trap ; 1
1438 vmm64On32TrapEntry vmm64On32Trap ; 2
1439 vmm64On32TrapEntry vmm64On32Trap ; 3
1440 vmm64On32TrapEntry vmm64On32Trap ; 4
1441 vmm64On32TrapEntry vmm64On32Trap ; 5
1442 vmm64On32TrapEntry vmm64On32Trap ; 6
1443 vmm64On32TrapEntry vmm64On32Trap ; 7
1444 vmm64On32TrapEntry vmm64On32TrapErrCode ; 8
1445 vmm64On32TrapEntry vmm64On32Trap ; 9
1446 vmm64On32TrapEntry vmm64On32TrapErrCode ; a
1447 vmm64On32TrapEntry vmm64On32TrapErrCode ; b
1448 vmm64On32TrapEntry vmm64On32TrapErrCode ; c
1449 vmm64On32TrapEntry vmm64On32TrapErrCode ; d
1450 vmm64On32TrapEntry vmm64On32TrapErrCode ; e
1451 vmm64On32TrapEntry vmm64On32Trap ; f (reserved)
1452 vmm64On32TrapEntry vmm64On32Trap ; 10
1453 vmm64On32TrapEntry vmm64On32TrapErrCode ; 11
1454 vmm64On32TrapEntry vmm64On32Trap ; 12
1455 vmm64On32TrapEntry vmm64On32Trap ; 13
1456%rep (0x100 - 0x14)
1457 vmm64On32TrapEntry vmm64On32Trap
1458%endrep
1459ENDPROC vmm64On32TrapHandlers
1460
1461;; Fake an error code and jump to the real thing.
1462BEGINPROC vmm64On32Trap
1463 push qword [rsp]
1464 jmp NAME(vmm64On32TrapErrCode)
1465ENDPROC vmm64On32Trap
1466
1467
1468;;
1469; Trap frame:
1470; [rbp + 38h] = ss
1471; [rbp + 30h] = rsp
1472; [rbp + 28h] = eflags
1473; [rbp + 20h] = cs
1474; [rbp + 18h] = rip
1475; [rbp + 10h] = error code (or trap number)
1476; [rbp + 08h] = trap number
1477; [rbp + 00h] = rbp
1478; [rbp - 08h] = rax
1479; [rbp - 10h] = rbx
1480; [rbp - 18h] = ds
1481;
1482BEGINPROC vmm64On32TrapErrCode
1483 push rbp
1484 mov rbp, rsp
1485 push rax
1486 push rbx
1487 mov ax, ds
1488 push rax
1489 sub rsp, 20h
1490
1491 mov ax, cs
1492 mov ds, ax
1493
1494%if 1
1495 COM64_S_NEWLINE
1496 COM64_S_CHAR '!'
1497 COM64_S_CHAR 't'
1498 COM64_S_CHAR 'r'
1499 COM64_S_CHAR 'a'
1500 COM64_S_CHAR 'p'
1501 movzx eax, byte [rbp + 08h]
1502 COM64_S_DWORD_REG eax
1503 COM64_S_CHAR '!'
1504%endif
1505
1506%if 0 ;; @todo Figure the offset of the CPUMCPU relative to CPUM
1507 sidt [rsp]
1508 movsx eax, word [rsp]
1509 shr eax, 12 ; div by 16 * 256 (0x1000).
1510%else
1511 ; hardcoded VCPU(0) for now...
1512 mov rbx, [NAME(pCpumIC) wrt rip]
1513 mov eax, [rbx + CPUM.offCPUMCPU0]
1514%endif
1515 push rax ; Save the offset for rbp later.
1516
1517 add rbx, rax ; rbx = CPUMCPU
1518
1519 ;
1520 ; Deal with recursive traps due to vmxoff (lazy bird).
1521 ;
1522 lea rax, [.vmxoff_trap_location wrt rip]
1523 cmp rax, [rbp + 18h]
1524 je .not_vmx_root
1525
1526 ;
1527 ; Save the context.
1528 ;
1529 mov rax, [rbp - 8]
1530 mov [rbx + CPUMCPU.Hyper.eax], rax
1531 mov [rbx + CPUMCPU.Hyper.ecx], rcx
1532 mov [rbx + CPUMCPU.Hyper.edx], rdx
1533 mov rax, [rbp - 10h]
1534 mov [rbx + CPUMCPU.Hyper.ebx], rax
1535 mov rax, [rbp]
1536 mov [rbx + CPUMCPU.Hyper.ebp], rax
1537 mov rax, [rbp + 30h]
1538 mov [rbx + CPUMCPU.Hyper.esp], rax
1539 mov [rbx + CPUMCPU.Hyper.edi], rdi
1540 mov [rbx + CPUMCPU.Hyper.esi], rsi
1541 mov [rbx + CPUMCPU.Hyper.r8], r8
1542 mov [rbx + CPUMCPU.Hyper.r9], r9
1543 mov [rbx + CPUMCPU.Hyper.r10], r10
1544 mov [rbx + CPUMCPU.Hyper.r11], r11
1545 mov [rbx + CPUMCPU.Hyper.r12], r12
1546 mov [rbx + CPUMCPU.Hyper.r13], r13
1547 mov [rbx + CPUMCPU.Hyper.r14], r14
1548 mov [rbx + CPUMCPU.Hyper.r15], r15
1549
1550 mov rax, [rbp + 18h]
1551 mov [rbx + CPUMCPU.Hyper.eip], rax
1552 movzx ax, [rbp + 20h]
1553 mov [rbx + CPUMCPU.Hyper.cs.Sel], ax
1554 mov ax, [rbp + 38h]
1555 mov [rbx + CPUMCPU.Hyper.ss.Sel], ax
1556 mov ax, [rbp - 18h]
1557 mov [rbx + CPUMCPU.Hyper.ds.Sel], ax
1558
1559 mov rax, [rbp + 28h]
1560 mov [rbx + CPUMCPU.Hyper.eflags], rax
1561
1562 mov rax, cr2
1563 mov [rbx + CPUMCPU.Hyper.cr2], rax
1564
1565 mov rax, [rbp + 10h]
1566 mov [rbx + CPUMCPU.Hyper.r14], rax ; r14 = error code
1567 movzx eax, byte [rbp + 08h]
1568 mov [rbx + CPUMCPU.Hyper.r15], rax ; r15 = trap number
1569
1570 ;
1571 ; Finally, leave VMX root operation before trying to return to the host.
1572 ;
1573 mov rax, cr4
1574 test rax, X86_CR4_VMXE
1575 jz .not_vmx_root
1576.vmxoff_trap_location:
1577 vmxoff
1578.not_vmx_root:
1579
1580 ;
1581 ; Go back to the host.
1582 ;
1583 pop rbp
1584 mov dword [rbx + CPUMCPU.u32RetCode], VERR_TRPM_DONT_PANIC
1585 jmp NAME(vmmRCToHostAsm)
1586ENDPROC vmm64On32TrapErrCode
1587
1588;; We allocate the IDT here to avoid having to allocate memory separately somewhere.
1589ALIGNCODE(16)
1590GLOBALNAME vmm64On32Idt
1591%assign i 0
1592%rep 256
1593 dq NAME(vmm64On32Trap %+ i) - NAME(Start) ; Relative trap handler offsets.
1594 dq 0
1595%assign i (i + 1)
1596%endrep
1597
1598
1599 %if 0
1600;; For debugging purposes.
1601BEGINPROC vmm64On32PrintIdtr
1602 push rax
1603 push rsi ; paranoia
1604 push rdi ; ditto
1605 sub rsp, 16
1606
1607 COM64_S_CHAR ';'
1608 COM64_S_CHAR 'i'
1609 COM64_S_CHAR 'd'
1610 COM64_S_CHAR 't'
1611 COM64_S_CHAR 'r'
1612 COM64_S_CHAR '='
1613 sidt [rsp + 6]
1614 mov eax, [rsp + 8 + 4]
1615 COM64_S_DWORD_REG eax
1616 mov eax, [rsp + 8]
1617 COM64_S_DWORD_REG eax
1618 COM64_S_CHAR ':'
1619 movzx eax, word [rsp + 6]
1620 COM64_S_DWORD_REG eax
1621 COM64_S_CHAR '!'
1622
1623 add rsp, 16
1624 pop rdi
1625 pop rsi
1626 pop rax
1627 ret
1628ENDPROC vmm64On32PrintIdtr
1629 %endif
1630
1631 %if 1
1632;; For debugging purposes.
1633BEGINPROC vmm64On32DumpCmos
1634 push rax
1635 push rdx
1636 push rcx
1637 push rsi ; paranoia
1638 push rdi ; ditto
1639 sub rsp, 16
1640
1641%if 0
1642 mov al, 3
1643 out 72h, al
1644 mov al, 68h
1645 out 73h, al
1646%endif
1647
1648 COM64_S_NEWLINE
1649 COM64_S_CHAR 'c'
1650 COM64_S_CHAR 'm'
1651 COM64_S_CHAR 'o'
1652 COM64_S_CHAR 's'
1653 COM64_S_CHAR '0'
1654 COM64_S_CHAR ':'
1655
1656 xor ecx, ecx
1657.loop1:
1658 mov al, cl
1659 out 70h, al
1660 in al, 71h
1661 COM64_S_BYTE_REG eax
1662 COM64_S_CHAR ' '
1663 inc ecx
1664 cmp ecx, 128
1665 jb .loop1
1666
1667 COM64_S_NEWLINE
1668 COM64_S_CHAR 'c'
1669 COM64_S_CHAR 'm'
1670 COM64_S_CHAR 'o'
1671 COM64_S_CHAR 's'
1672 COM64_S_CHAR '1'
1673 COM64_S_CHAR ':'
1674 xor ecx, ecx
1675.loop2:
1676 mov al, cl
1677 out 72h, al
1678 in al, 73h
1679 COM64_S_BYTE_REG eax
1680 COM64_S_CHAR ' '
1681 inc ecx
1682 cmp ecx, 128
1683 jb .loop2
1684
1685%if 0
1686 COM64_S_NEWLINE
1687 COM64_S_CHAR 'c'
1688 COM64_S_CHAR 'm'
1689 COM64_S_CHAR 'o'
1690 COM64_S_CHAR 's'
1691 COM64_S_CHAR '2'
1692 COM64_S_CHAR ':'
1693 xor ecx, ecx
1694.loop3:
1695 mov al, cl
1696 out 74h, al
1697 in al, 75h
1698 COM64_S_BYTE_REG eax
1699 COM64_S_CHAR ' '
1700 inc ecx
1701 cmp ecx, 128
1702 jb .loop3
1703
1704 COM64_S_NEWLINE
1705 COM64_S_CHAR 'c'
1706 COM64_S_CHAR 'm'
1707 COM64_S_CHAR 'o'
1708 COM64_S_CHAR 's'
1709 COM64_S_CHAR '3'
1710 COM64_S_CHAR ':'
1711 xor ecx, ecx
1712.loop4:
1713 mov al, cl
1714 out 72h, al
1715 in al, 73h
1716 COM64_S_BYTE_REG eax
1717 COM64_S_CHAR ' '
1718 inc ecx
1719 cmp ecx, 128
1720 jb .loop4
1721
1722 COM64_S_NEWLINE
1723%endif
1724
1725 add rsp, 16
1726 pop rdi
1727 pop rsi
1728 pop rcx
1729 pop rdx
1730 pop rax
1731 ret
1732ENDPROC vmm64On32DumpCmos
1733 %endif
1734
1735%endif ; VBOX_WITH_64ON32_IDT
1736
1737
1738
1739;
1740;
1741; Back to switcher code.
1742; Back to switcher code.
1743; Back to switcher code.
1744;
1745;
1746
1747
1748
1749;;
1750; Trampoline for doing a call when starting the hyper visor execution.
1751;
1752; Push any arguments to the routine.
1753; Push the argument frame size (cArg * 4).
1754; Push the call target (_cdecl convention).
1755; Push the address of this routine.
1756;
1757;
1758BITS 64
1759ALIGNCODE(16)
1760BEGINPROC vmmRCCallTrampoline
1761%ifdef DEBUG_STUFF
1762 COM64_S_CHAR 'c'
1763 COM64_S_CHAR 't'
1764 COM64_S_CHAR '!'
1765%endif
1766 int3
1767ENDPROC vmmRCCallTrampoline
1768
1769
1770;;
1771; The C interface.
1772;
1773BITS 64
1774ALIGNCODE(16)
1775BEGINPROC vmmRCToHost
1776%ifdef DEBUG_STUFF
1777 push rsi
1778 COM_NEWLINE
1779 COM_CHAR 'b'
1780 COM_CHAR 'a'
1781 COM_CHAR 'c'
1782 COM_CHAR 'k'
1783 COM_CHAR '!'
1784 COM_NEWLINE
1785 pop rsi
1786%endif
1787 int3
1788ENDPROC vmmRCToHost
1789
1790;;
1791; vmmRCToHostAsm
1792;
1793; This is an alternative entry point which we'll be using
1794; when the we have saved the guest state already or we haven't
1795; been messing with the guest at all.
1796;
1797; @param rbp The virtual cpu number.
1798; @param
1799;
1800BITS 64
1801ALIGNCODE(16)
1802BEGINPROC vmmRCToHostAsm
1803NAME(vmmRCToHostAsmNoReturn):
1804 ;; We're still in the intermediate memory context!
1805
1806 ;;
1807 ;; Switch to compatibility mode, placing ourselves in identity mapped code.
1808 ;;
1809 jmp far [NAME(fpIDEnterTarget) wrt rip]
1810
1811; 16:32 Pointer to IDEnterTarget.
1812NAME(fpIDEnterTarget):
1813 FIXUP FIX_ID_32BIT, 0, NAME(IDExitTarget) - NAME(Start)
1814dd 0
1815 FIXUP FIX_HYPER_CS, 0
1816dd 0
1817
1818 ; We're now on identity mapped pages!
1819ALIGNCODE(16)
1820GLOBALNAME IDExitTarget
1821BITS 32
1822 DEBUG32_CHAR('1')
1823
1824 ; 1. Deactivate long mode by turning off paging.
1825 mov ebx, cr0
1826 and ebx, ~X86_CR0_PG
1827 mov cr0, ebx
1828 DEBUG32_CHAR('2')
1829
1830 ; 2. Load intermediate page table.
1831 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
1832 mov edx, 0ffffffffh
1833 mov cr3, edx
1834 DEBUG32_CHAR('3')
1835
1836 ; 3. Disable long mode.
1837 mov ecx, MSR_K6_EFER
1838 rdmsr
1839 DEBUG32_CHAR('5')
1840 and eax, ~(MSR_K6_EFER_LME)
1841 wrmsr
1842 DEBUG32_CHAR('6')
1843
1844%ifndef NEED_PAE_ON_HOST
1845 ; 3b. Disable PAE.
1846 mov eax, cr4
1847 and eax, ~X86_CR4_PAE
1848 mov cr4, eax
1849 DEBUG32_CHAR('7')
1850%endif
1851
1852 ; 4. Enable paging.
1853 or ebx, X86_CR0_PG
1854 mov cr0, ebx
1855 jmp short just_a_jump
1856just_a_jump:
1857 DEBUG32_CHAR('8')
1858
1859 ;;
1860 ;; 5. Jump to guest code mapping of the code and load the Hypervisor CS.
1861 ;;
1862 FIXUP FIX_ID_2_HC_NEAR_REL, 1, NAME(ICExitTarget) - NAME(Start)
1863 jmp near NAME(ICExitTarget)
1864
1865 ;;
1866 ;; When we arrive at this label we're at the host mapping of the
1867 ;; switcher code, but with intermediate page tables.
1868 ;;
1869BITS 32
1870ALIGNCODE(16)
1871GLOBALNAME ICExitTarget
1872 DEBUG32_CHAR('9')
1873 ;DEBUG_CMOS_TRASH_AL 70h
1874
1875 ; load the hypervisor data selector into ds & es
1876 FIXUP FIX_HYPER_DS, 1
1877 mov eax, 0ffffh
1878 mov ds, eax
1879 mov es, eax
1880 DEBUG32_CHAR('a')
1881
1882 FIXUP FIX_GC_CPUM_OFF, 1, 0
1883 mov edx, 0ffffffffh
1884 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
1885
1886 DEBUG32_CHAR('b')
1887 mov esi, [edx + CPUMCPU.Host.cr3]
1888 mov cr3, esi
1889 DEBUG32_CHAR('c')
1890
1891 ;; now we're in host memory context, let's restore regs
1892 FIXUP FIX_HC_CPUM_OFF, 1, 0
1893 mov edx, 0ffffffffh
1894 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
1895 DEBUG32_CHAR('e')
1896
1897 ; restore the host EFER
1898 mov ebx, edx
1899 mov ecx, MSR_K6_EFER
1900 mov eax, [ebx + CPUMCPU.Host.efer]
1901 mov edx, [ebx + CPUMCPU.Host.efer + 4]
1902 DEBUG32_CHAR('f')
1903 wrmsr
1904 mov edx, ebx
1905 DEBUG32_CHAR('g')
1906
1907 ; activate host gdt and idt
1908 lgdt [edx + CPUMCPU.Host.gdtr]
1909 DEBUG32_CHAR('0')
1910 lidt [edx + CPUMCPU.Host.idtr]
1911 DEBUG32_CHAR('1')
1912
1913 ; Restore TSS selector; must mark it as not busy before using ltr (!)
1914 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
1915 movzx eax, word [edx + CPUMCPU.Host.tr] ; eax <- TR
1916 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
1917 add eax, [edx + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
1918 and dword [eax + 4], ~0200h ; clear busy flag (2nd type2 bit)
1919 ltr word [edx + CPUMCPU.Host.tr]
1920
1921 ; activate ldt
1922 DEBUG32_CHAR('2')
1923 lldt [edx + CPUMCPU.Host.ldtr]
1924
1925 ; Restore segment registers
1926 mov eax, [edx + CPUMCPU.Host.ds]
1927 mov ds, eax
1928 mov eax, [edx + CPUMCPU.Host.es]
1929 mov es, eax
1930 mov eax, [edx + CPUMCPU.Host.fs]
1931 mov fs, eax
1932 mov eax, [edx + CPUMCPU.Host.gs]
1933 mov gs, eax
1934 ; restore stack
1935 lss esp, [edx + CPUMCPU.Host.esp]
1936
1937 ; Control registers.
1938 mov ecx, [edx + CPUMCPU.Host.cr4]
1939 mov cr4, ecx
1940 mov ecx, [edx + CPUMCPU.Host.cr0]
1941 mov cr0, ecx
1942 ;mov ecx, [edx + CPUMCPU.Host.cr2] ; assumes this is waste of time.
1943 ;mov cr2, ecx
1944
1945 ; restore general registers.
1946 mov edi, [edx + CPUMCPU.Host.edi]
1947 mov esi, [edx + CPUMCPU.Host.esi]
1948 mov ebx, [edx + CPUMCPU.Host.ebx]
1949 mov ebp, [edx + CPUMCPU.Host.ebp]
1950
1951 ; store the return code in eax
1952 DEBUG_CMOS_TRASH_AL 79h
1953 mov eax, [edx + CPUMCPU.u32RetCode]
1954 retf
1955ENDPROC vmmRCToHostAsm
1956
1957
1958GLOBALNAME End
1959;
1960; The description string (in the text section).
1961;
1962NAME(Description):
1963 db SWITCHER_DESCRIPTION
1964 db 0
1965
1966extern NAME(Relocate)
1967
1968;
1969; End the fixup records.
1970;
1971BEGINDATA
1972 db FIX_THE_END ; final entry.
1973GLOBALNAME FixupsEnd
1974
1975;;
1976; The switcher definition structure.
1977ALIGNDATA(16)
1978GLOBALNAME Def
1979 istruc VMMSWITCHERDEF
1980 at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
1981 at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
1982 at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
1983 at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF NAME(Relocate)
1984 at VMMSWITCHERDEF.enmType, dd SWITCHER_TYPE
1985 at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
1986 at VMMSWITCHERDEF.offR0ToRawMode, dd NAME(vmmR0ToRawMode) - NAME(Start)
1987 at VMMSWITCHERDEF.offRCToHost, dd NAME(vmmRCToHost) - NAME(Start)
1988 at VMMSWITCHERDEF.offRCCallTrampoline, dd NAME(vmmRCCallTrampoline) - NAME(Start)
1989 at VMMSWITCHERDEF.offRCToHostAsm, dd NAME(vmmRCToHostAsm) - NAME(Start)
1990 at VMMSWITCHERDEF.offRCToHostAsmNoReturn, dd NAME(vmmRCToHostAsmNoReturn) - NAME(Start)
1991 ; disasm help
1992 at VMMSWITCHERDEF.offHCCode0, dd 0
1993 at VMMSWITCHERDEF.cbHCCode0, dd NAME(IDEnterTarget) - NAME(Start)
1994 at VMMSWITCHERDEF.offHCCode1, dd NAME(ICExitTarget) - NAME(Start)
1995 at VMMSWITCHERDEF.cbHCCode1, dd NAME(End) - NAME(ICExitTarget)
1996 at VMMSWITCHERDEF.offIDCode0, dd NAME(IDEnterTarget) - NAME(Start)
1997 at VMMSWITCHERDEF.cbIDCode0, dd NAME(ICEnterTarget) - NAME(IDEnterTarget)
1998 at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start)
1999 at VMMSWITCHERDEF.cbIDCode1, dd NAME(ICExitTarget) - NAME(Start)
2000%ifdef VBOX_WITH_64ON32_IDT ; Hack! Use offGCCode to find the IDT.
2001 at VMMSWITCHERDEF.offGCCode, dd NAME(vmm64On32Idt) - NAME(Start)
2002%else
2003 at VMMSWITCHERDEF.offGCCode, dd 0
2004%endif
2005 at VMMSWITCHERDEF.cbGCCode, dd 0
2006
2007 iend
2008
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette