VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMSwitcher/LegacyandAMD64.mac@ 55048

Last change on this file since 55048 was 55048, checked in by vboxsync, 10 years ago

VMM,REM: Allocate the FPU/SSE/AVX/FUTURE state stuff. We need to use pointers to substates anyway and this will make CPUMCPU much smaller.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 50.5 KB
Line 
1; $Id: LegacyandAMD64.mac 55048 2015-03-31 18:49:19Z vboxsync $
2;; @file
3; VMM - World Switchers, 32-bit to AMD64 intermediate context.
4;
5; This is used for running 64-bit guest on 32-bit hosts, not
6; normal raw-mode. All the code involved is contained in this
7; file.
8;
9
10;
11; Copyright (C) 2006-2013 Oracle Corporation
12;
13; This file is part of VirtualBox Open Source Edition (OSE), as
14; available from http://www.virtualbox.org. This file is free software;
15; you can redistribute it and/or modify it under the terms of the GNU
16; General Public License (GPL) as published by the Free Software
17; Foundation, in version 2 as it comes in the "COPYING" file of the
18; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
19; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
20;
21
22
23;*******************************************************************************
24;* Defined Constants And Macros *
25;*******************************************************************************
26;; @note These values are from the HM64ON32OP enum in hm.h.
27%define HM64ON32OP_VMXRCStartVM64 1
28%define HM64ON32OP_SVMRCVMRun64 2
29%define HM64ON32OP_HMRCSaveGuestFPU64 3
30%define HM64ON32OP_HMRCSaveGuestDebug64 4
31%define HM64ON32OP_HMRCTestSwitcher64 5
32
33;;
34; This macro is used for storing a debug code in a CMOS location.
35;
36; If we tripple fault or something, the debug code can be retrieved and we
37; might have a clue as to where the problem occurred. The code is currently
38; using CMOS register 3 in the 2nd bank as this _seems_ to be unused on my
39; Extreme4 X79 asrock mainboard.
40;
41; @param %1 The debug code (byte)
42; @note Trashes AL.
43;
44%macro DEBUG_CMOS_TRASH_AL 1
45%ifdef VBOX_WITH_64ON32_CMOS_DEBUG
46 mov al, 3
47 out 72h, al
48 mov al, %1
49 out 73h, al
50 in al, 73h
51%endif
52%endmacro
53
54;;
55; Version of DEBUG_CMOS_TRASH_AL that saves AL on the stack and therefore
56; doesn't trash any registers.
57;
58%macro DEBUG_CMOS_STACK64 1
59%ifdef VBOX_WITH_64ON32_CMOS_DEBUG
60 push rax
61 DEBUG_CMOS_TRASH_AL %1
62 pop rax
63%endif
64%endmacro
65
66;;
67; Version of DEBUG_CMOS_TRASH_AL that saves AL on the stack and therefore
68; doesn't trash any registers.
69;
70%macro DEBUG_CMOS_STACK32 1
71%ifdef VBOX_WITH_64ON32_CMOS_DEBUG
72 push eax
73 DEBUG_CMOS_TRASH_AL %1
74 pop eax
75%endif
76%endmacro
77
78
79;; Stubs for making OS/2 compile (though, not work).
80%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
81 %macro vmwrite 2,
82 int3
83 %endmacro
84 %define vmlaunch int3
85 %define vmresume int3
86 %define vmsave int3
87 %define vmload int3
88 %define vmrun int3
89 %define clgi int3
90 %define stgi int3
91 %macro invlpga 2,
92 int3
93 %endmacro
94%endif
95
96;; Debug options
97;%define DEBUG_STUFF 1
98;%define STRICT_IF 1
99
100
101;*******************************************************************************
102;* Header Files *
103;*******************************************************************************
104%include "VBox/asmdefs.mac"
105%include "iprt/x86.mac"
106%include "VBox/err.mac"
107%include "VBox/apic.mac"
108
109%include "VBox/vmm/cpum.mac"
110%include "VBox/vmm/stam.mac"
111%include "VBox/vmm/vm.mac"
112%include "VBox/vmm/hm_vmx.mac"
113%include "CPUMInternal.mac"
114%include "HMInternal.mac"
115%include "VMMSwitcher.mac"
116
117
118;
119; Start the fixup records
120; We collect the fixups in the .data section as we go along
121; It is therefore VITAL that no-one is using the .data section
122; for anything else between 'Start' and 'End'.
123;
124BEGINDATA
125GLOBALNAME Fixups
126
127
128
129BEGINCODE
130GLOBALNAME Start
131
132BITS 32
133
134;;
135; The C interface.
136; @param [esp + 04h] Param 1 - VM handle
137; @param [esp + 08h] Param 2 - Offset from VM::CPUM to the CPUMCPU
138; structure for the calling EMT.
139;
140BEGINPROC vmmR0ToRawMode
141%ifdef DEBUG_STUFF
142 COM32_S_NEWLINE
143 COM32_S_CHAR '^'
144%endif
145
146%ifdef VBOX_WITH_STATISTICS
147 ;
148 ; Switcher stats.
149 ;
150 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToGC
151 mov edx, 0ffffffffh
152 STAM_PROFILE_ADV_START edx
153%endif
154
155 push ebp
156 mov ebp, [esp + 12] ; CPUMCPU offset
157
158 ; turn off interrupts
159 pushf
160 cli
161 ;DEBUG_CMOS_STACK32 10h
162
163 ;
164 ; Call worker.
165 ;
166 FIXUP FIX_HC_CPUM_OFF, 1, 0
167 mov edx, 0ffffffffh
168 push cs ; allow for far return and restore cs correctly.
169 call NAME(vmmR0ToRawModeAsm)
170
171%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
172 ; Restore blocked Local APIC NMI vectors
173 ; Do this here to ensure the host CS is already restored
174 mov ecx, [edx + CPUMCPU.fApicDisVectors]
175 test ecx, ecx
176 jz gth_apic_done
177 cmp byte [edx + CPUMCPU.fX2Apic], 1
178 je gth_x2apic
179
180 ; Legacy xAPIC mode:
181 mov edx, [edx + CPUMCPU.pvApicBase]
182 shr ecx, 1
183 jnc gth_nolint0
184 and dword [edx + APIC_REG_LVT_LINT0], ~APIC_REG_LVT_MASKED
185gth_nolint0:
186 shr ecx, 1
187 jnc gth_nolint1
188 and dword [edx + APIC_REG_LVT_LINT1], ~APIC_REG_LVT_MASKED
189gth_nolint1:
190 shr ecx, 1
191 jnc gth_nopc
192 and dword [edx + APIC_REG_LVT_PC], ~APIC_REG_LVT_MASKED
193gth_nopc:
194 shr ecx, 1
195 jnc gth_notherm
196 and dword [edx + APIC_REG_LVT_THMR], ~APIC_REG_LVT_MASKED
197gth_notherm:
198 shr ecx, 1
199 jnc gth_nocmci
200 and dword [edx + APIC_REG_LVT_CMCI], ~APIC_REG_LVT_MASKED
201gth_nocmci:
202 jmp gth_apic_done
203
204 ; x2APIC mode:
205gth_x2apic:
206 ;DEBUG_CMOS_STACK32 7ch
207 push eax ; save eax
208 push ebx ; save it for fApicDisVectors
209 push edx ; save edx just in case.
210 mov ebx, ecx ; ebx = fApicDisVectors, ecx free for MSR use
211 shr ebx, 1
212 jnc gth_x2_nolint0
213 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT0 >> 4)
214 rdmsr
215 and eax, ~APIC_REG_LVT_MASKED
216 wrmsr
217gth_x2_nolint0:
218 shr ebx, 1
219 jnc gth_x2_nolint1
220 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT1 >> 4)
221 rdmsr
222 and eax, ~APIC_REG_LVT_MASKED
223 wrmsr
224gth_x2_nolint1:
225 shr ebx, 1
226 jnc gth_x2_nopc
227 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_PC >> 4)
228 rdmsr
229 and eax, ~APIC_REG_LVT_MASKED
230 wrmsr
231gth_x2_nopc:
232 shr ebx, 1
233 jnc gth_x2_notherm
234 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_THMR >> 4)
235 rdmsr
236 and eax, ~APIC_REG_LVT_MASKED
237 wrmsr
238gth_x2_notherm:
239 shr ebx, 1
240 jnc gth_x2_nocmci
241 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_CMCI >> 4)
242 rdmsr
243 and eax, ~APIC_REG_LVT_MASKED
244 wrmsr
245gth_x2_nocmci:
246 pop edx
247 pop ebx
248 pop eax
249
250gth_apic_done:
251%endif
252
253 ; restore original flags
254 ;DEBUG_CMOS_STACK32 7eh
255 popf
256 pop ebp
257
258%ifdef VBOX_WITH_STATISTICS
259 ;
260 ; Switcher stats.
261 ;
262 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToHC
263 mov edx, 0ffffffffh
264 STAM_PROFILE_ADV_STOP edx
265%endif
266
267 ;DEBUG_CMOS_STACK32 7fh
268 ret
269
270ENDPROC vmmR0ToRawMode
271
272; *****************************************************************************
273; vmmR0ToRawModeAsm
274;
275; Phase one of the switch from host to guest context (host MMU context)
276;
277; INPUT:
278; - edx virtual address of CPUM structure (valid in host context)
279; - ebp offset of the CPUMCPU structure relative to CPUM.
280;
281; USES/DESTROYS:
282; - eax, ecx, edx, esi
283;
284; ASSUMPTION:
285; - current CS and DS selectors are wide open
286;
287; *****************************************************************************
288ALIGNCODE(16)
289BEGINPROC vmmR0ToRawModeAsm
290 ;;
291 ;; Save CPU host context
292 ;; Skip eax, edx and ecx as these are not preserved over calls.
293 ;;
294 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
295%ifdef VBOX_WITH_CRASHDUMP_MAGIC
296 ; phys address of scratch page
297 mov eax, dword [edx + CPUMCPU.Guest.dr + 4*8]
298 mov cr2, eax
299
300 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 1
301%endif
302
303 ; general registers.
304 mov [edx + CPUMCPU.Host.ebx], ebx
305 mov [edx + CPUMCPU.Host.edi], edi
306 mov [edx + CPUMCPU.Host.esi], esi
307 mov [edx + CPUMCPU.Host.esp], esp
308 mov [edx + CPUMCPU.Host.ebp], ebp ; offCpumCpu!
309 ; selectors.
310 mov [edx + CPUMCPU.Host.ds], ds
311 mov [edx + CPUMCPU.Host.es], es
312 mov [edx + CPUMCPU.Host.fs], fs
313 mov [edx + CPUMCPU.Host.gs], gs
314 mov [edx + CPUMCPU.Host.ss], ss
315 ; special registers.
316 DEBUG32_S_CHAR('s')
317 DEBUG32_S_CHAR(';')
318 sldt [edx + CPUMCPU.Host.ldtr]
319 sidt [edx + CPUMCPU.Host.idtr]
320 sgdt [edx + CPUMCPU.Host.gdtr]
321 str [edx + CPUMCPU.Host.tr]
322
323%ifdef VBOX_WITH_CRASHDUMP_MAGIC
324 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 2
325%endif
326
327%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
328 ; Block Local APIC NMI vectors
329 DEBUG32_S_CHAR('f')
330 DEBUG32_S_CHAR(';')
331 cmp byte [edx + CPUMCPU.fX2Apic], 1
332 je htg_x2apic
333
334 ; Legacy xAPIC mode. No write completion required when writing to the
335 ; LVT registers as we have mapped the APIC pages as non-cacheable and
336 ; the MMIO is CPU-local.
337 mov ebx, [edx + CPUMCPU.pvApicBase]
338 or ebx, ebx
339 jz htg_apic_done
340 mov eax, [ebx + APIC_REG_LVT_LINT0]
341 mov ecx, eax
342 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
343 cmp ecx, APIC_REG_LVT_MODE_NMI
344 jne htg_nolint0
345 or edi, 0x01
346 or eax, APIC_REG_LVT_MASKED
347 mov [ebx + APIC_REG_LVT_LINT0], eax
348htg_nolint0:
349 mov eax, [ebx + APIC_REG_LVT_LINT1]
350 mov ecx, eax
351 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
352 cmp ecx, APIC_REG_LVT_MODE_NMI
353 jne htg_nolint1
354 or edi, 0x02
355 or eax, APIC_REG_LVT_MASKED
356 mov [ebx + APIC_REG_LVT_LINT1], eax
357htg_nolint1:
358 mov eax, [ebx + APIC_REG_LVT_PC]
359 mov ecx, eax
360 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
361 cmp ecx, APIC_REG_LVT_MODE_NMI
362 jne htg_nopc
363 or edi, 0x04
364 or eax, APIC_REG_LVT_MASKED
365 mov [ebx + APIC_REG_LVT_PC], eax
366htg_nopc:
367 mov eax, [ebx + APIC_REG_VERSION]
368 shr eax, 16
369 cmp al, 5
370 jb htg_notherm
371 je htg_nocmci
372 mov eax, [ebx + APIC_REG_LVT_CMCI]
373 mov ecx, eax
374 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
375 cmp ecx, APIC_REG_LVT_MODE_NMI
376 jne htg_nocmci
377 or edi, 0x10
378 or eax, APIC_REG_LVT_MASKED
379 mov [ebx + APIC_REG_LVT_CMCI], eax
380htg_nocmci:
381 mov eax, [ebx + APIC_REG_LVT_THMR]
382 mov ecx, eax
383 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
384 cmp ecx, APIC_REG_LVT_MODE_NMI
385 jne htg_notherm
386 or edi, 0x08
387 or eax, APIC_REG_LVT_MASKED
388 mov [ebx + APIC_REG_LVT_THMR], eax
389htg_notherm:
390 mov [edx + CPUMCPU.fApicDisVectors], edi
391 jmp htg_apic_done
392
393 ; x2APIC mode:
394htg_x2apic:
395 mov esi, edx ; Save edx.
396 xor edi, edi ; fApicDisVectors
397
398 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT0 >> 4)
399 rdmsr
400 mov ebx, eax
401 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
402 cmp ebx, APIC_REG_LVT_MODE_NMI
403 jne htg_x2_nolint0
404 or edi, 0x01
405 or eax, APIC_REG_LVT_MASKED
406 wrmsr
407htg_x2_nolint0:
408 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT1 >> 4)
409 rdmsr
410 mov ebx, eax
411 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
412 cmp ebx, APIC_REG_LVT_MODE_NMI
413 jne htg_x2_nolint1
414 or edi, 0x02
415 or eax, APIC_REG_LVT_MASKED
416 wrmsr
417htg_x2_nolint1:
418 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_PC >> 4)
419 rdmsr
420 mov ebx, eax
421 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
422 cmp ebx, APIC_REG_LVT_MODE_NMI
423 jne htg_x2_nopc
424 or edi, 0x04
425 or eax, APIC_REG_LVT_MASKED
426 wrmsr
427htg_x2_nopc:
428 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_VERSION >> 4)
429 rdmsr
430 shr eax, 16
431 cmp al, 5
432 jb htg_x2_notherm
433 je htg_x2_nocmci
434 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_CMCI >> 4)
435 rdmsr
436 mov ebx, eax
437 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
438 cmp ebx, APIC_REG_LVT_MODE_NMI
439 jne htg_x2_nocmci
440 or edi, 0x10
441 or eax, APIC_REG_LVT_MASKED
442 wrmsr
443htg_x2_nocmci:
444 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_THMR >> 4)
445 rdmsr
446 mov ebx, eax
447 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
448 cmp ebx, APIC_REG_LVT_MODE_NMI
449 jne htg_x2_notherm
450 or edi, 0x08
451 or eax, APIC_REG_LVT_MASKED
452 wrmsr
453htg_x2_notherm:
454 mov edx, esi ; Restore edx.
455 mov [edx + CPUMCPU.fApicDisVectors], edi
456
457htg_apic_done:
458%endif
459
460 ; control registers.
461 mov eax, cr0
462 mov [edx + CPUMCPU.Host.cr0], eax
463 ;Skip cr2; assume host os don't stuff things in cr2. (safe)
464 mov eax, cr3
465 mov [edx + CPUMCPU.Host.cr3], eax
466 mov esi, cr4 ; esi = cr4, we'll modify it further down.
467 mov [edx + CPUMCPU.Host.cr4], esi
468
469 DEBUG32_S_CHAR('c')
470 DEBUG32_S_CHAR(';')
471
472 ; save the host EFER msr
473 mov ebx, edx
474 mov ecx, MSR_K6_EFER
475 rdmsr
476 mov [ebx + CPUMCPU.Host.efer], eax
477 mov [ebx + CPUMCPU.Host.efer + 4], edx
478 mov edx, ebx
479 DEBUG32_S_CHAR('e')
480 DEBUG32_S_CHAR(';')
481
482%ifdef VBOX_WITH_CRASHDUMP_MAGIC
483 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 3
484%endif
485
486 ; Load new gdt so we can do a far jump after going into 64 bits mode
487 ;DEBUG_CMOS_STACK32 16h
488 lgdt [edx + CPUMCPU.Hyper.gdtr]
489
490 DEBUG32_S_CHAR('g')
491 DEBUG32_S_CHAR('!')
492%ifdef VBOX_WITH_CRASHDUMP_MAGIC
493 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 4
494%endif
495
496 ;;
497 ;; Clean up CR4. X86_CR4_PGE, X86_CR4_PCE, X86_CR4_PCIDE (not really
498 ;; relevant for 32-bit, but whatever) and X86_CR4_VMXE must be cleared.
499 ;;
500 and esi, X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE | X86_CR4_PSE | X86_CR4_PAE \
501 | X86_CR4_MCE | X86_CR4_OSFXSR | X86_CR4_OSXMMEEXCPT | X86_CR4_SMXE | X86_CR4_OSXSAVE
502 mov cr4, esi
503
504 ;;
505 ;; Load Intermediate memory context.
506 ;;
507 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
508 mov eax, 0ffffffffh
509 mov cr3, eax
510 DEBUG32_CHAR('?')
511%ifdef VBOX_WITH_64ON32_CMOS_DEBUG
512 DEBUG_CMOS_TRASH_AL 17h
513%endif
514
515 ;;
516 ;; Jump to identity mapped location
517 ;;
518 FIXUP FIX_HC_2_ID_NEAR_REL, 1, NAME(IDEnterTarget) - NAME(Start)
519 jmp near NAME(IDEnterTarget)
520
521
522 ; We're now on identity mapped pages!
523ALIGNCODE(16)
524GLOBALNAME IDEnterTarget
525 DEBUG32_CHAR('1')
526 DEBUG_CMOS_TRASH_AL 19h
527
528 ; 1. Disable paging.
529 mov ebx, cr0
530 and ebx, ~X86_CR0_PG
531 mov cr0, ebx
532 DEBUG32_CHAR('2')
533 DEBUG_CMOS_TRASH_AL 1ah
534
535%ifdef VBOX_WITH_CRASHDUMP_MAGIC
536 mov eax, cr2
537 mov dword [eax], 3
538%endif
539
540 ; 2. Enable PAE.
541 mov ecx, cr4
542 or ecx, X86_CR4_PAE
543 mov cr4, ecx
544 DEBUG_CMOS_TRASH_AL 1bh
545
546 ; 3. Load long mode intermediate CR3.
547 FIXUP FIX_INTER_AMD64_CR3, 1
548 mov ecx, 0ffffffffh
549 mov cr3, ecx
550 DEBUG32_CHAR('3')
551 DEBUG_CMOS_TRASH_AL 1ch
552
553%ifdef VBOX_WITH_CRASHDUMP_MAGIC
554 mov eax, cr2
555 mov dword [eax], 4
556%endif
557
558 ; 4. Enable long mode.
559 mov esi, edx
560 mov ecx, MSR_K6_EFER
561 rdmsr
562 FIXUP FIX_EFER_OR_MASK, 1
563 or eax, 0ffffffffh
564 and eax, ~(MSR_K6_EFER_FFXSR) ; turn off fast fxsave/fxrstor (skipping xmm regs)
565 wrmsr
566 mov edx, esi
567 DEBUG32_CHAR('4')
568 DEBUG_CMOS_TRASH_AL 1dh
569
570%ifdef VBOX_WITH_CRASHDUMP_MAGIC
571 mov eax, cr2
572 mov dword [eax], 5
573%endif
574
575 ; 5. Enable paging.
576 or ebx, X86_CR0_PG
577 ; Disable ring 0 write protection too
578 and ebx, ~X86_CR0_WRITE_PROTECT
579 mov cr0, ebx
580 DEBUG32_CHAR('5')
581
582 ; Jump from compatibility mode to 64-bit mode.
583 FIXUP FIX_ID_FAR32_TO_64BIT_MODE, 1, NAME(IDEnter64Mode) - NAME(Start)
584 jmp 0ffffh:0fffffffeh
585
586 ;
587 ; We're in 64-bit mode (ds, ss, es, fs, gs are all bogus).
588BITS 64
589ALIGNCODE(16)
590NAME(IDEnter64Mode):
591 DEBUG64_CHAR('6')
592 DEBUG_CMOS_TRASH_AL 1eh
593 jmp [NAME(pICEnterTarget) wrt rip]
594
595; 64-bit jump target
596NAME(pICEnterTarget):
597FIXUP FIX_HC_64BIT_NOCHECK, 0, NAME(ICEnterTarget) - NAME(Start)
598dq 0ffffffffffffffffh
599
600; 64-bit pCpum address.
601NAME(pCpumIC):
602FIXUP FIX_GC_64_BIT_CPUM_OFF, 0, 0
603dq 0ffffffffffffffffh
604
605%ifdef VBOX_WITH_CRASHDUMP_MAGIC
606NAME(pMarker):
607db 'Switch_marker'
608%endif
609
610 ;
611 ; When we arrive here we're in 64 bits mode in the intermediate context
612 ;
613ALIGNCODE(16)
614GLOBALNAME ICEnterTarget
615 ;DEBUG_CMOS_TRASH_AL 1fh
616 ; Load CPUM pointer into rdx
617 mov rdx, [NAME(pCpumIC) wrt rip]
618 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
619
620 mov rax, cs
621 mov ds, rax
622 mov es, rax
623
624 ; Invalidate fs & gs
625 mov rax, 0
626 mov fs, rax
627 mov gs, rax
628
629%ifdef VBOX_WITH_CRASHDUMP_MAGIC
630 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 5
631%endif
632
633 ; Setup stack.
634 DEBUG64_CHAR('7')
635 mov rsp, 0
636 mov eax, [rdx + CPUMCPU.Hyper.ss.Sel]
637 mov ss, ax
638 mov esp, [rdx + CPUMCPU.Hyper.esp]
639
640%ifdef VBOX_WITH_CRASHDUMP_MAGIC
641 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 6
642%endif
643
644%ifdef VBOX_WITH_64ON32_IDT
645 ; Set up emergency trap handlers.
646 lidt [rdx + CPUMCPU.Hyper.idtr]
647%endif
648
649 ; load the hypervisor function address
650 mov r9, [rdx + CPUMCPU.Hyper.eip]
651 DEBUG64_S_CHAR('8')
652
653 ; Check if we need to restore the guest FPU state
654 mov esi, [rdx + CPUMCPU.fUseFlags] ; esi == use flags.
655 test esi, CPUM_SYNC_FPU_STATE
656 jz near htg_fpu_no
657
658%ifdef VBOX_WITH_CRASHDUMP_MAGIC
659 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 7
660%endif
661
662 mov rax, cr0
663 mov rcx, rax ; save old CR0
664 and rax, ~(X86_CR0_TS | X86_CR0_EM)
665 mov cr0, rax
666 mov eax, [rdx + CPUMCPU.Guest.pXStateRC]
667 o64 fxrstor [rax] ; (use explicit REX prefix, see @bugref{6398})
668 mov cr0, rcx ; and restore old CR0 again
669
670 and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_FPU_STATE
671
672htg_fpu_no:
673 ; Check if we need to restore the guest debug state
674 test esi, CPUM_SYNC_DEBUG_REGS_GUEST | CPUM_SYNC_DEBUG_REGS_HYPER
675 jz htg_debug_done
676
677%ifdef VBOX_WITH_CRASHDUMP_MAGIC
678 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 8
679%endif
680 test esi, CPUM_SYNC_DEBUG_REGS_HYPER
681 jnz htg_debug_hyper
682
683 ; Guest values in DRx, letting the guest access them directly.
684 mov rax, qword [rdx + CPUMCPU.Guest.dr + 0*8]
685 mov dr0, rax
686 mov rax, qword [rdx + CPUMCPU.Guest.dr + 1*8]
687 mov dr1, rax
688 mov rax, qword [rdx + CPUMCPU.Guest.dr + 2*8]
689 mov dr2, rax
690 mov rax, qword [rdx + CPUMCPU.Guest.dr + 3*8]
691 mov dr3, rax
692 mov rax, qword [rdx + CPUMCPU.Guest.dr + 6*8]
693 mov dr6, rax ; not required for AMD-V
694
695 and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_DEBUG_REGS_GUEST
696 or dword [rdx + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_GUEST
697 jmp htg_debug_done
698
699htg_debug_hyper:
700 ; Combined values in DRx, intercepting all accesses.
701 mov rax, qword [rdx + CPUMCPU.Hyper.dr + 0*8]
702 mov dr0, rax
703 mov rax, qword [rdx + CPUMCPU.Hyper.dr + 1*8]
704 mov dr1, rax
705 mov rax, qword [rdx + CPUMCPU.Hyper.dr + 2*8]
706 mov dr2, rax
707 mov rax, qword [rdx + CPUMCPU.Hyper.dr + 3*8]
708 mov dr3, rax
709 mov rax, qword [rdx + CPUMCPU.Hyper.dr + 6*8]
710 mov dr6, rax ; not required for AMD-V
711
712 and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_DEBUG_REGS_HYPER
713 or dword [rdx + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HYPER
714
715htg_debug_done:
716
717%ifdef VBOX_WITH_CRASHDUMP_MAGIC
718 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 9
719%endif
720
721 ;
722 ; "Call" the specified helper function.
723 ;
724
725 ; parameter for all helper functions (pCtx)
726 DEBUG64_CHAR('9')
727 lea rsi, [rdx + CPUMCPU.Guest]
728 lea rax, [htg_return wrt rip]
729 push rax ; return address
730
731 cmp r9d, HM64ON32OP_VMXRCStartVM64
732 jz NAME(VMXRCStartVM64)
733 cmp r9d, HM64ON32OP_SVMRCVMRun64
734 jz NAME(SVMRCVMRun64)
735 cmp r9d, HM64ON32OP_HMRCSaveGuestFPU64
736 jz NAME(HMRCSaveGuestFPU64)
737 cmp r9d, HM64ON32OP_HMRCSaveGuestDebug64
738 jz NAME(HMRCSaveGuestDebug64)
739 cmp r9d, HM64ON32OP_HMRCTestSwitcher64
740 jz NAME(HMRCTestSwitcher64)
741 mov eax, VERR_HM_INVALID_HM64ON32OP
742htg_return:
743 DEBUG64_CHAR('r')
744
745 ; Load CPUM pointer into rdx
746 mov rdx, [NAME(pCpumIC) wrt rip]
747 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
748
749%ifdef VBOX_WITH_CRASHDUMP_MAGIC
750 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 10
751%endif
752
753 ; Save the return code
754 mov dword [rdx + CPUMCPU.u32RetCode], eax
755
756 ; now let's switch back
757 jmp NAME(vmmRCToHostAsm) ; rax = returncode.
758
759ENDPROC vmmR0ToRawModeAsm
760
761
762
763
764;
765;
766; HM code (used to be HMRCA.asm at one point).
767; HM code (used to be HMRCA.asm at one point).
768; HM code (used to be HMRCA.asm at one point).
769;
770;
771
772;; @def MYPUSHSEGS
773; Macro saving all segment registers on the stack.
774; @param 1 full width register name
775%macro MYPUSHSEGS 1
776 mov %1, es
777 push %1
778 mov %1, ds
779 push %1
780%endmacro
781
782;; @def MYPOPSEGS
783; Macro restoring all segment registers on the stack
784; @param 1 full width register name
785%macro MYPOPSEGS 1
786 pop %1
787 mov ds, %1
788 pop %1
789 mov es, %1
790%endmacro
791
792
793;/**
794; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
795; *
796; * @returns VBox status code
797; * @param HCPhysCpuPage VMXON physical address [rsp+8]
798; * @param HCPhysVmcs VMCS physical address [rsp+16]
799; * @param pCache VMCS cache [rsp+24]
800; * @param pCtx Guest context (rsi)
801; */
802BEGINPROC VMXRCStartVM64
803 push rbp
804 mov rbp, rsp
805 DEBUG_CMOS_STACK64 20h
806
807 ; Make sure VT-x instructions are allowed.
808 mov rax, cr4
809 or rax, X86_CR4_VMXE
810 mov cr4, rax
811
812 ; Enter VMX Root Mode.
813 vmxon [rbp + 8 + 8]
814 jnc .vmxon_success
815 mov rax, VERR_VMX_INVALID_VMXON_PTR
816 jmp .vmstart64_vmxon_failed
817
818.vmxon_success:
819 jnz .vmxon_success2
820 mov rax, VERR_VMX_VMXON_FAILED
821 jmp .vmstart64_vmxon_failed
822
823.vmxon_success2:
824 ; Activate the VMCS pointer
825 vmptrld [rbp + 16 + 8]
826 jnc .vmptrld_success
827 mov rax, VERR_VMX_INVALID_VMCS_PTR
828 jmp .vmstart64_vmxoff_end
829
830.vmptrld_success:
831 jnz .vmptrld_success2
832 mov rax, VERR_VMX_VMPTRLD_FAILED
833 jmp .vmstart64_vmxoff_end
834
835.vmptrld_success2:
836
837 ; Save the VMCS pointer on the stack
838 push qword [rbp + 16 + 8];
839
840 ; Save segment registers.
841 MYPUSHSEGS rax
842
843%ifdef VMX_USE_CACHED_VMCS_ACCESSES
844 ; Flush the VMCS write cache first (before any other vmreads/vmwrites!).
845 mov rbx, [rbp + 24 + 8] ; pCache
846
847 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
848 mov qword [rbx + VMCSCACHE.uPos], 2
849 %endif
850
851 %ifdef DEBUG
852 mov rax, [rbp + 8 + 8] ; HCPhysCpuPage
853 mov [rbx + VMCSCACHE.TestIn.HCPhysCpuPage], rax
854 mov rax, [rbp + 16 + 8] ; HCPhysVmcs
855 mov [rbx + VMCSCACHE.TestIn.HCPhysVmcs], rax
856 mov [rbx + VMCSCACHE.TestIn.pCache], rbx
857 mov [rbx + VMCSCACHE.TestIn.pCtx], rsi
858 %endif
859
860 mov ecx, [rbx + VMCSCACHE.Write.cValidEntries]
861 cmp ecx, 0
862 je .no_cached_writes
863 mov rdx, rcx
864 mov rcx, 0
865 jmp .cached_write
866
867ALIGN(16)
868.cached_write:
869 mov eax, [rbx + VMCSCACHE.Write.aField + rcx*4]
870 vmwrite rax, qword [rbx + VMCSCACHE.Write.aFieldVal + rcx*8]
871 inc rcx
872 cmp rcx, rdx
873 jl .cached_write
874
875 mov dword [rbx + VMCSCACHE.Write.cValidEntries], 0
876.no_cached_writes:
877
878 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
879 mov qword [rbx + VMCSCACHE.uPos], 3
880 %endif
881 ; Save the pCache pointer.
882 push rbx
883%endif
884
885 ; Save the host state that's relevant in the temporary 64-bit mode.
886 mov rdx, cr0
887 mov eax, VMX_VMCS_HOST_CR0
888 vmwrite rax, rdx
889
890 mov rdx, cr3
891 mov eax, VMX_VMCS_HOST_CR3
892 vmwrite rax, rdx
893
894 mov rdx, cr4
895 mov eax, VMX_VMCS_HOST_CR4
896 vmwrite rax, rdx
897
898 mov rdx, cs
899 mov eax, VMX_VMCS_HOST_FIELD_CS
900 vmwrite rax, rdx
901
902 mov rdx, ss
903 mov eax, VMX_VMCS_HOST_FIELD_SS
904 vmwrite rax, rdx
905
906%if 0 ; Another experiment regarding tripple faults... Seems not to be necessary.
907 sub rsp, 16
908 str [rsp]
909 movsx rdx, word [rsp]
910 mov eax, VMX_VMCS_HOST_FIELD_TR
911 vmwrite rax, rdx
912 add rsp, 16
913%endif
914
915 sub rsp, 16
916 sgdt [rsp + 6] ; (The 64-bit base should be aligned, not the word.)
917 mov eax, VMX_VMCS_HOST_GDTR_BASE
918 vmwrite rax, [rsp + 6 + 2]
919 add rsp, 16
920
921%ifdef VBOX_WITH_64ON32_IDT
922 sub rsp, 16
923 sidt [rsp + 6]
924 mov eax, VMX_VMCS_HOST_IDTR_BASE
925 vmwrite rax, [rsp + 6 + 2] ; [rsi + CPUMCPU.Hyper.idtr + 2] - why doesn't this work?
926 add rsp, 16
927 ;call NAME(vmm64On32PrintIdtr)
928%endif
929
930%ifdef VBOX_WITH_CRASHDUMP_MAGIC
931 mov qword [rbx + VMCSCACHE.uPos], 4
932%endif
933
934 ; Hopefully we can ignore TR (we restore it anyway on the way back to 32-bit mode).
935
936 ; First we have to save some final CPU context registers.
937 lea rdx, [.vmlaunch64_done wrt rip]
938 mov rax, VMX_VMCS_HOST_RIP ; Return address (too difficult to continue after VMLAUNCH?).
939 vmwrite rax, rdx
940 ; Note: assumes success!
941
942 ; Manual save and restore:
943 ; - General purpose registers except RIP, RSP
944 ;
945 ; Trashed:
946 ; - CR2 (we don't care)
947 ; - LDTR (reset to 0)
948 ; - DRx (presumably not changed at all)
949 ; - DR7 (reset to 0x400)
950 ; - EFLAGS (reset to RT_BIT(1); not relevant)
951
952%ifdef VBOX_WITH_CRASHDUMP_MAGIC
953 mov qword [rbx + VMCSCACHE.uPos], 5
954%endif
955
956 ; Save the pCtx pointer
957 push rsi
958
959 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
960 mov rbx, qword [rsi + CPUMCTX.cr2]
961 mov rdx, cr2
962 cmp rdx, rbx
963 je .skipcr2write64
964 mov cr2, rbx
965
966.skipcr2write64:
967 mov eax, VMX_VMCS_HOST_RSP
968 vmwrite rax, rsp
969 ; Note: assumes success!
970 ; Don't mess with ESP anymore!!!
971
972 ; Save Guest's general purpose registers.
973 mov rax, qword [rsi + CPUMCTX.eax]
974 mov rbx, qword [rsi + CPUMCTX.ebx]
975 mov rcx, qword [rsi + CPUMCTX.ecx]
976 mov rdx, qword [rsi + CPUMCTX.edx]
977 mov rbp, qword [rsi + CPUMCTX.ebp]
978 mov r8, qword [rsi + CPUMCTX.r8]
979 mov r9, qword [rsi + CPUMCTX.r9]
980 mov r10, qword [rsi + CPUMCTX.r10]
981 mov r11, qword [rsi + CPUMCTX.r11]
982 mov r12, qword [rsi + CPUMCTX.r12]
983 mov r13, qword [rsi + CPUMCTX.r13]
984 mov r14, qword [rsi + CPUMCTX.r14]
985 mov r15, qword [rsi + CPUMCTX.r15]
986
987 ; Save rdi & rsi.
988 mov rdi, qword [rsi + CPUMCTX.edi]
989 mov rsi, qword [rsi + CPUMCTX.esi]
990
991 vmlaunch
992 jmp .vmlaunch64_done; ; Here if vmlaunch detected a failure.
993
994ALIGNCODE(16)
995.vmlaunch64_done:
996%if 0 ;fixme later - def VBOX_WITH_64ON32_IDT
997 push rdx
998 mov rdx, [rsp + 8] ; pCtx
999 lidt [rdx + CPUMCPU.Hyper.idtr]
1000 pop rdx
1001%endif
1002 jc near .vmstart64_invalid_vmcs_ptr
1003 jz near .vmstart64_start_failed
1004
1005 push rdi
1006 mov rdi, [rsp + 8] ; pCtx
1007
1008 mov qword [rdi + CPUMCTX.eax], rax
1009 mov qword [rdi + CPUMCTX.ebx], rbx
1010 mov qword [rdi + CPUMCTX.ecx], rcx
1011 mov qword [rdi + CPUMCTX.edx], rdx
1012 mov qword [rdi + CPUMCTX.esi], rsi
1013 mov qword [rdi + CPUMCTX.ebp], rbp
1014 mov qword [rdi + CPUMCTX.r8], r8
1015 mov qword [rdi + CPUMCTX.r9], r9
1016 mov qword [rdi + CPUMCTX.r10], r10
1017 mov qword [rdi + CPUMCTX.r11], r11
1018 mov qword [rdi + CPUMCTX.r12], r12
1019 mov qword [rdi + CPUMCTX.r13], r13
1020 mov qword [rdi + CPUMCTX.r14], r14
1021 mov qword [rdi + CPUMCTX.r15], r15
1022 mov rax, cr2
1023 mov qword [rdi + CPUMCTX.cr2], rax
1024
1025 pop rax ; The guest edi we pushed above
1026 mov qword [rdi + CPUMCTX.edi], rax
1027
1028 pop rsi ; pCtx (needed in rsi by the macros below)
1029
1030%ifdef VMX_USE_CACHED_VMCS_ACCESSES
1031 pop rdi ; Saved pCache
1032
1033 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
1034 mov dword [rdi + VMCSCACHE.uPos], 7
1035 %endif
1036 %ifdef DEBUG
1037 mov [rdi + VMCSCACHE.TestOut.pCache], rdi
1038 mov [rdi + VMCSCACHE.TestOut.pCtx], rsi
1039 mov rax, cr8
1040 mov [rdi + VMCSCACHE.TestOut.cr8], rax
1041 %endif
1042
1043 mov ecx, [rdi + VMCSCACHE.Read.cValidEntries]
1044 cmp ecx, 0 ; Can't happen
1045 je .no_cached_reads
1046 jmp .cached_read
1047
1048ALIGN(16)
1049.cached_read:
1050 dec rcx
1051 mov eax, [rdi + VMCSCACHE.Read.aField + rcx*4]
1052 vmread qword [rdi + VMCSCACHE.Read.aFieldVal + rcx*8], rax
1053 cmp rcx, 0
1054 jnz .cached_read
1055.no_cached_reads:
1056 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
1057 mov dword [rdi + VMCSCACHE.uPos], 8
1058 %endif
1059%endif
1060
1061 ; Restore segment registers.
1062 MYPOPSEGS rax
1063
1064 mov eax, VINF_SUCCESS
1065
1066%ifdef VBOX_WITH_CRASHDUMP_MAGIC
1067 mov dword [rdi + VMCSCACHE.uPos], 9
1068%endif
1069.vmstart64_end:
1070
1071%ifdef VMX_USE_CACHED_VMCS_ACCESSES
1072 %ifdef DEBUG
1073 mov rdx, [rsp] ; HCPhysVmcs
1074 mov [rdi + VMCSCACHE.TestOut.HCPhysVmcs], rdx
1075 %endif
1076%endif
1077
1078 ; Write back the data and disable the VMCS.
1079 vmclear qword [rsp] ; Pushed pVMCS
1080 add rsp, 8
1081
1082.vmstart64_vmxoff_end:
1083 ; Disable VMX root mode.
1084 vmxoff
1085.vmstart64_vmxon_failed:
1086%ifdef VMX_USE_CACHED_VMCS_ACCESSES
1087 %ifdef DEBUG
1088 cmp eax, VINF_SUCCESS
1089 jne .skip_flags_save
1090
1091 pushf
1092 pop rdx
1093 mov [rdi + VMCSCACHE.TestOut.eflags], rdx
1094 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
1095 mov dword [rdi + VMCSCACHE.uPos], 12
1096 %endif
1097.skip_flags_save:
1098 %endif
1099%endif
1100 pop rbp
1101 ret
1102
1103
1104.vmstart64_invalid_vmcs_ptr:
1105 pop rsi ; pCtx (needed in rsi by the macros below)
1106
1107%ifdef VMX_USE_CACHED_VMCS_ACCESSES
1108 pop rdi ; pCache
1109 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
1110 mov dword [rdi + VMCSCACHE.uPos], 10
1111 %endif
1112
1113 %ifdef DEBUG
1114 mov [rdi + VMCSCACHE.TestOut.pCache], rdi
1115 mov [rdi + VMCSCACHE.TestOut.pCtx], rsi
1116 %endif
1117%endif
1118
1119 ; Restore segment registers.
1120 MYPOPSEGS rax
1121
1122 ; Restore all general purpose host registers.
1123 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
1124 jmp .vmstart64_end
1125
1126.vmstart64_start_failed:
1127 pop rsi ; pCtx (needed in rsi by the macros below)
1128
1129%ifdef VMX_USE_CACHED_VMCS_ACCESSES
1130 pop rdi ; pCache
1131
1132 %ifdef DEBUG
1133 mov [rdi + VMCSCACHE.TestOut.pCache], rdi
1134 mov [rdi + VMCSCACHE.TestOut.pCtx], rsi
1135 %endif
1136 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
1137 mov dword [rdi + VMCSCACHE.uPos], 11
1138 %endif
1139%endif
1140
1141 ; Restore segment registers.
1142 MYPOPSEGS rax
1143
1144 ; Restore all general purpose host registers.
1145 mov eax, VERR_VMX_UNABLE_TO_START_VM
1146 jmp .vmstart64_end
1147ENDPROC VMXRCStartVM64
1148
1149
1150;/**
1151; * Prepares for and executes VMRUN (64 bits guests)
1152; *
1153; * @returns VBox status code
1154; * @param HCPhysVMCB Physical address of host VMCB (rsp+8)
1155; * @param HCPhysVMCB Physical address of guest VMCB (rsp+16)
1156; * @param pCtx Guest context (rsi)
1157; */
1158BEGINPROC SVMRCVMRun64
1159 push rbp
1160 mov rbp, rsp
1161 pushf
1162 DEBUG_CMOS_STACK64 30h
1163
1164 ; Manual save and restore:
1165 ; - General purpose registers except RIP, RSP, RAX
1166 ;
1167 ; Trashed:
1168 ; - CR2 (we don't care)
1169 ; - LDTR (reset to 0)
1170 ; - DRx (presumably not changed at all)
1171 ; - DR7 (reset to 0x400)
1172
1173 ; Save the Guest CPU context pointer.
1174 push rsi ; Push for saving the state at the end
1175
1176 ; Save host fs, gs, sysenter msr etc
1177 mov rax, [rbp + 8 + 8] ; pVMCBHostPhys (64 bits physical address)
1178 push rax ; Save for the vmload after vmrun
1179 vmsave
1180
1181 ; Setup eax for VMLOAD
1182 mov rax, [rbp + 8 + 8 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address)
1183
1184 ; Restore Guest's general purpose registers.
1185 ; rax is loaded from the VMCB by VMRUN.
1186 mov rbx, qword [rsi + CPUMCTX.ebx]
1187 mov rcx, qword [rsi + CPUMCTX.ecx]
1188 mov rdx, qword [rsi + CPUMCTX.edx]
1189 mov rdi, qword [rsi + CPUMCTX.edi]
1190 mov rbp, qword [rsi + CPUMCTX.ebp]
1191 mov r8, qword [rsi + CPUMCTX.r8]
1192 mov r9, qword [rsi + CPUMCTX.r9]
1193 mov r10, qword [rsi + CPUMCTX.r10]
1194 mov r11, qword [rsi + CPUMCTX.r11]
1195 mov r12, qword [rsi + CPUMCTX.r12]
1196 mov r13, qword [rsi + CPUMCTX.r13]
1197 mov r14, qword [rsi + CPUMCTX.r14]
1198 mov r15, qword [rsi + CPUMCTX.r15]
1199 mov rsi, qword [rsi + CPUMCTX.esi]
1200
1201 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
1202 clgi
1203 sti
1204
1205 ; Load guest fs, gs, sysenter msr etc
1206 vmload
1207 ; Run the VM
1208 vmrun
1209
1210 ; rax is in the VMCB already; we can use it here.
1211
1212 ; Save guest fs, gs, sysenter msr etc.
1213 vmsave
1214
1215 ; Load host fs, gs, sysenter msr etc.
1216 pop rax ; Pushed above
1217 vmload
1218
1219 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
1220 cli
1221 stgi
1222
1223 pop rax ; pCtx
1224
1225 mov qword [rax + CPUMCTX.ebx], rbx
1226 mov qword [rax + CPUMCTX.ecx], rcx
1227 mov qword [rax + CPUMCTX.edx], rdx
1228 mov qword [rax + CPUMCTX.esi], rsi
1229 mov qword [rax + CPUMCTX.edi], rdi
1230 mov qword [rax + CPUMCTX.ebp], rbp
1231 mov qword [rax + CPUMCTX.r8], r8
1232 mov qword [rax + CPUMCTX.r9], r9
1233 mov qword [rax + CPUMCTX.r10], r10
1234 mov qword [rax + CPUMCTX.r11], r11
1235 mov qword [rax + CPUMCTX.r12], r12
1236 mov qword [rax + CPUMCTX.r13], r13
1237 mov qword [rax + CPUMCTX.r14], r14
1238 mov qword [rax + CPUMCTX.r15], r15
1239
1240 mov eax, VINF_SUCCESS
1241
1242 popf
1243 pop rbp
1244 ret
1245ENDPROC SVMRCVMRun64
1246
1247;/**
1248; * Saves the guest FPU context
1249; *
1250; * @returns VBox status code
1251; * @param pCtx Guest context [rsi]
1252; */
1253BEGINPROC HMRCSaveGuestFPU64
1254 DEBUG_CMOS_STACK64 40h
1255 mov rax, cr0
1256 mov rcx, rax ; save old CR0
1257 and rax, ~(X86_CR0_TS | X86_CR0_EM)
1258 mov cr0, rax
1259
1260 mov eax, [rsi + CPUMCTX.pXStateRC]
1261 o64 fxsave [rax] ; (use explicit REX prefix, see @bugref{6398})
1262
1263 mov cr0, rcx ; and restore old CR0 again
1264
1265 mov eax, VINF_SUCCESS
1266 ret
1267ENDPROC HMRCSaveGuestFPU64
1268
1269;/**
1270; * Saves the guest debug context (DR0-3, DR6)
1271; *
1272; * @returns VBox status code
1273; * @param pCtx Guest context [rsi]
1274; */
1275BEGINPROC HMRCSaveGuestDebug64
1276 DEBUG_CMOS_STACK64 41h
1277 mov rax, dr0
1278 mov qword [rsi + CPUMCTX.dr + 0*8], rax
1279 mov rax, dr1
1280 mov qword [rsi + CPUMCTX.dr + 1*8], rax
1281 mov rax, dr2
1282 mov qword [rsi + CPUMCTX.dr + 2*8], rax
1283 mov rax, dr3
1284 mov qword [rsi + CPUMCTX.dr + 3*8], rax
1285 mov rax, dr6
1286 mov qword [rsi + CPUMCTX.dr + 6*8], rax
1287 mov eax, VINF_SUCCESS
1288 ret
1289ENDPROC HMRCSaveGuestDebug64
1290
1291;/**
1292; * Dummy callback handler
1293; *
1294; * @returns VBox status code
1295; * @param param1 Parameter 1 [rsp+8]
1296; * @param param2 Parameter 2 [rsp+12]
1297; * @param param3 Parameter 3 [rsp+16]
1298; * @param param4 Parameter 4 [rsp+20]
1299; * @param param5 Parameter 5 [rsp+24]
1300; * @param pCtx Guest context [rsi]
1301; */
1302BEGINPROC HMRCTestSwitcher64
1303 DEBUG_CMOS_STACK64 42h
1304 mov eax, [rsp+8]
1305 ret
1306ENDPROC HMRCTestSwitcher64
1307
1308
1309%ifdef VBOX_WITH_64ON32_IDT
1310;
1311; Trap handling.
1312;
1313
1314;; Here follows an array of trap handler entry points, 8 byte in size.
1315BEGINPROC vmm64On32TrapHandlers
1316%macro vmm64On32TrapEntry 1
1317GLOBALNAME vmm64On32Trap %+ i
1318 db 06ah, i ; push imm8 - note that this is a signextended value.
1319 jmp NAME(%1)
1320 ALIGNCODE(8)
1321%assign i i+1
1322%endmacro
1323%assign i 0 ; start counter.
1324 vmm64On32TrapEntry vmm64On32Trap ; 0
1325 vmm64On32TrapEntry vmm64On32Trap ; 1
1326 vmm64On32TrapEntry vmm64On32Trap ; 2
1327 vmm64On32TrapEntry vmm64On32Trap ; 3
1328 vmm64On32TrapEntry vmm64On32Trap ; 4
1329 vmm64On32TrapEntry vmm64On32Trap ; 5
1330 vmm64On32TrapEntry vmm64On32Trap ; 6
1331 vmm64On32TrapEntry vmm64On32Trap ; 7
1332 vmm64On32TrapEntry vmm64On32TrapErrCode ; 8
1333 vmm64On32TrapEntry vmm64On32Trap ; 9
1334 vmm64On32TrapEntry vmm64On32TrapErrCode ; a
1335 vmm64On32TrapEntry vmm64On32TrapErrCode ; b
1336 vmm64On32TrapEntry vmm64On32TrapErrCode ; c
1337 vmm64On32TrapEntry vmm64On32TrapErrCode ; d
1338 vmm64On32TrapEntry vmm64On32TrapErrCode ; e
1339 vmm64On32TrapEntry vmm64On32Trap ; f (reserved)
1340 vmm64On32TrapEntry vmm64On32Trap ; 10
1341 vmm64On32TrapEntry vmm64On32TrapErrCode ; 11
1342 vmm64On32TrapEntry vmm64On32Trap ; 12
1343 vmm64On32TrapEntry vmm64On32Trap ; 13
1344%rep (0x100 - 0x14)
1345 vmm64On32TrapEntry vmm64On32Trap
1346%endrep
1347ENDPROC vmm64On32TrapHandlers
1348
1349;; Fake an error code and jump to the real thing.
1350BEGINPROC vmm64On32Trap
1351 push qword [rsp]
1352 jmp NAME(vmm64On32TrapErrCode)
1353ENDPROC vmm64On32Trap
1354
1355
1356;;
1357; Trap frame:
1358; [rbp + 38h] = ss
1359; [rbp + 30h] = rsp
1360; [rbp + 28h] = eflags
1361; [rbp + 20h] = cs
1362; [rbp + 18h] = rip
1363; [rbp + 10h] = error code (or trap number)
1364; [rbp + 08h] = trap number
1365; [rbp + 00h] = rbp
1366; [rbp - 08h] = rax
1367; [rbp - 10h] = rbx
1368; [rbp - 18h] = ds
1369;
1370BEGINPROC vmm64On32TrapErrCode
1371 push rbp
1372 mov rbp, rsp
1373 push rax
1374 push rbx
1375 mov ax, ds
1376 push rax
1377 sub rsp, 20h
1378
1379 mov ax, cs
1380 mov ds, ax
1381
1382%if 1
1383 COM64_S_NEWLINE
1384 COM64_S_CHAR '!'
1385 COM64_S_CHAR 't'
1386 COM64_S_CHAR 'r'
1387 COM64_S_CHAR 'a'
1388 COM64_S_CHAR 'p'
1389 movzx eax, byte [rbp + 08h]
1390 COM64_S_DWORD_REG eax
1391 COM64_S_CHAR '!'
1392%endif
1393
1394%if 0 ;; @todo Figure the offset of the CPUMCPU relative to CPUM
1395 sidt [rsp]
1396 movsx eax, word [rsp]
1397 shr eax, 12 ; div by 16 * 256 (0x1000).
1398%else
1399 ; hardcoded VCPU(0) for now...
1400 mov rbx, [NAME(pCpumIC) wrt rip]
1401 mov eax, [rbx + CPUM.offCPUMCPU0]
1402%endif
1403 push rax ; Save the offset for rbp later.
1404
1405 add rbx, rax ; rbx = CPUMCPU
1406
1407 ;
1408 ; Deal with recursive traps due to vmxoff (lazy bird).
1409 ;
1410 lea rax, [.vmxoff_trap_location wrt rip]
1411 cmp rax, [rbp + 18h]
1412 je .not_vmx_root
1413
1414 ;
1415 ; Save the context.
1416 ;
1417 mov rax, [rbp - 8]
1418 mov [rbx + CPUMCPU.Hyper.eax], rax
1419 mov [rbx + CPUMCPU.Hyper.ecx], rcx
1420 mov [rbx + CPUMCPU.Hyper.edx], rdx
1421 mov rax, [rbp - 10h]
1422 mov [rbx + CPUMCPU.Hyper.ebx], rax
1423 mov rax, [rbp]
1424 mov [rbx + CPUMCPU.Hyper.ebp], rax
1425 mov rax, [rbp + 30h]
1426 mov [rbx + CPUMCPU.Hyper.esp], rax
1427 mov [rbx + CPUMCPU.Hyper.edi], rdi
1428 mov [rbx + CPUMCPU.Hyper.esi], rsi
1429 mov [rbx + CPUMCPU.Hyper.r8], r8
1430 mov [rbx + CPUMCPU.Hyper.r9], r9
1431 mov [rbx + CPUMCPU.Hyper.r10], r10
1432 mov [rbx + CPUMCPU.Hyper.r11], r11
1433 mov [rbx + CPUMCPU.Hyper.r12], r12
1434 mov [rbx + CPUMCPU.Hyper.r13], r13
1435 mov [rbx + CPUMCPU.Hyper.r14], r14
1436 mov [rbx + CPUMCPU.Hyper.r15], r15
1437
1438 mov rax, [rbp + 18h]
1439 mov [rbx + CPUMCPU.Hyper.eip], rax
1440 movzx ax, [rbp + 20h]
1441 mov [rbx + CPUMCPU.Hyper.cs.Sel], ax
1442 mov ax, [rbp + 38h]
1443 mov [rbx + CPUMCPU.Hyper.ss.Sel], ax
1444 mov ax, [rbp - 18h]
1445 mov [rbx + CPUMCPU.Hyper.ds.Sel], ax
1446
1447 mov rax, [rbp + 28h]
1448 mov [rbx + CPUMCPU.Hyper.eflags], rax
1449
1450 mov rax, cr2
1451 mov [rbx + CPUMCPU.Hyper.cr2], rax
1452
1453 mov rax, [rbp + 10h]
1454 mov [rbx + CPUMCPU.Hyper.r14], rax ; r14 = error code
1455 movzx eax, byte [rbp + 08h]
1456 mov [rbx + CPUMCPU.Hyper.r15], rax ; r15 = trap number
1457
1458 ;
1459 ; Finally, leave VMX root operation before trying to return to the host.
1460 ;
1461 mov rax, cr4
1462 test rax, X86_CR4_VMXE
1463 jz .not_vmx_root
1464.vmxoff_trap_location:
1465 vmxoff
1466.not_vmx_root:
1467
1468 ;
1469 ; Go back to the host.
1470 ;
1471 pop rbp
1472 mov dword [rbx + CPUMCPU.u32RetCode], VERR_TRPM_DONT_PANIC
1473 jmp NAME(vmmRCToHostAsm)
1474ENDPROC vmm64On32TrapErrCode
1475
1476;; We allocate the IDT here to avoid having to allocate memory separately somewhere.
1477ALIGNCODE(16)
1478GLOBALNAME vmm64On32Idt
1479%assign i 0
1480%rep 256
1481 dq NAME(vmm64On32Trap %+ i) - NAME(Start) ; Relative trap handler offsets.
1482 dq 0
1483%assign i (i + 1)
1484%endrep
1485
1486
1487 %if 0
1488;; For debugging purposes.
1489BEGINPROC vmm64On32PrintIdtr
1490 push rax
1491 push rsi ; paranoia
1492 push rdi ; ditto
1493 sub rsp, 16
1494
1495 COM64_S_CHAR ';'
1496 COM64_S_CHAR 'i'
1497 COM64_S_CHAR 'd'
1498 COM64_S_CHAR 't'
1499 COM64_S_CHAR 'r'
1500 COM64_S_CHAR '='
1501 sidt [rsp + 6]
1502 mov eax, [rsp + 8 + 4]
1503 COM64_S_DWORD_REG eax
1504 mov eax, [rsp + 8]
1505 COM64_S_DWORD_REG eax
1506 COM64_S_CHAR ':'
1507 movzx eax, word [rsp + 6]
1508 COM64_S_DWORD_REG eax
1509 COM64_S_CHAR '!'
1510
1511 add rsp, 16
1512 pop rdi
1513 pop rsi
1514 pop rax
1515 ret
1516ENDPROC vmm64On32PrintIdtr
1517 %endif
1518
1519 %if 1
1520;; For debugging purposes.
1521BEGINPROC vmm64On32DumpCmos
1522 push rax
1523 push rdx
1524 push rcx
1525 push rsi ; paranoia
1526 push rdi ; ditto
1527 sub rsp, 16
1528
1529%if 0
1530 mov al, 3
1531 out 72h, al
1532 mov al, 68h
1533 out 73h, al
1534%endif
1535
1536 COM64_S_NEWLINE
1537 COM64_S_CHAR 'c'
1538 COM64_S_CHAR 'm'
1539 COM64_S_CHAR 'o'
1540 COM64_S_CHAR 's'
1541 COM64_S_CHAR '0'
1542 COM64_S_CHAR ':'
1543
1544 xor ecx, ecx
1545.loop1:
1546 mov al, cl
1547 out 70h, al
1548 in al, 71h
1549 COM64_S_BYTE_REG eax
1550 COM64_S_CHAR ' '
1551 inc ecx
1552 cmp ecx, 128
1553 jb .loop1
1554
1555 COM64_S_NEWLINE
1556 COM64_S_CHAR 'c'
1557 COM64_S_CHAR 'm'
1558 COM64_S_CHAR 'o'
1559 COM64_S_CHAR 's'
1560 COM64_S_CHAR '1'
1561 COM64_S_CHAR ':'
1562 xor ecx, ecx
1563.loop2:
1564 mov al, cl
1565 out 72h, al
1566 in al, 73h
1567 COM64_S_BYTE_REG eax
1568 COM64_S_CHAR ' '
1569 inc ecx
1570 cmp ecx, 128
1571 jb .loop2
1572
1573%if 0
1574 COM64_S_NEWLINE
1575 COM64_S_CHAR 'c'
1576 COM64_S_CHAR 'm'
1577 COM64_S_CHAR 'o'
1578 COM64_S_CHAR 's'
1579 COM64_S_CHAR '2'
1580 COM64_S_CHAR ':'
1581 xor ecx, ecx
1582.loop3:
1583 mov al, cl
1584 out 74h, al
1585 in al, 75h
1586 COM64_S_BYTE_REG eax
1587 COM64_S_CHAR ' '
1588 inc ecx
1589 cmp ecx, 128
1590 jb .loop3
1591
1592 COM64_S_NEWLINE
1593 COM64_S_CHAR 'c'
1594 COM64_S_CHAR 'm'
1595 COM64_S_CHAR 'o'
1596 COM64_S_CHAR 's'
1597 COM64_S_CHAR '3'
1598 COM64_S_CHAR ':'
1599 xor ecx, ecx
1600.loop4:
1601 mov al, cl
1602 out 72h, al
1603 in al, 73h
1604 COM64_S_BYTE_REG eax
1605 COM64_S_CHAR ' '
1606 inc ecx
1607 cmp ecx, 128
1608 jb .loop4
1609
1610 COM64_S_NEWLINE
1611%endif
1612
1613 add rsp, 16
1614 pop rdi
1615 pop rsi
1616 pop rcx
1617 pop rdx
1618 pop rax
1619 ret
1620ENDPROC vmm64On32DumpCmos
1621 %endif
1622
1623%endif ; VBOX_WITH_64ON32_IDT
1624
1625
1626
1627;
1628;
1629; Back to switcher code.
1630; Back to switcher code.
1631; Back to switcher code.
1632;
1633;
1634
1635
1636
1637;;
1638; Trampoline for doing a call when starting the hyper visor execution.
1639;
1640; Push any arguments to the routine.
1641; Push the argument frame size (cArg * 4).
1642; Push the call target (_cdecl convention).
1643; Push the address of this routine.
1644;
1645;
1646BITS 64
1647ALIGNCODE(16)
1648BEGINPROC vmmRCCallTrampoline
1649%ifdef DEBUG_STUFF
1650 COM64_S_CHAR 'c'
1651 COM64_S_CHAR 't'
1652 COM64_S_CHAR '!'
1653%endif
1654 int3
1655ENDPROC vmmRCCallTrampoline
1656
1657
1658;;
1659; The C interface.
1660;
1661BITS 64
1662ALIGNCODE(16)
1663BEGINPROC vmmRCToHost
1664%ifdef DEBUG_STUFF
1665 push rsi
1666 COM_NEWLINE
1667 COM_CHAR 'b'
1668 COM_CHAR 'a'
1669 COM_CHAR 'c'
1670 COM_CHAR 'k'
1671 COM_CHAR '!'
1672 COM_NEWLINE
1673 pop rsi
1674%endif
1675 int3
1676ENDPROC vmmRCToHost
1677
1678;;
1679; vmmRCToHostAsm
1680;
1681; This is an alternative entry point which we'll be using
1682; when the we have saved the guest state already or we haven't
1683; been messing with the guest at all.
1684;
1685; @param rbp The virtual cpu number.
1686; @param
1687;
1688BITS 64
1689ALIGNCODE(16)
1690BEGINPROC vmmRCToHostAsm
1691NAME(vmmRCToHostAsmNoReturn):
1692 ;; We're still in the intermediate memory context!
1693
1694 ;;
1695 ;; Switch to compatibility mode, placing ourselves in identity mapped code.
1696 ;;
1697 jmp far [NAME(fpIDEnterTarget) wrt rip]
1698
1699; 16:32 Pointer to IDEnterTarget.
1700NAME(fpIDEnterTarget):
1701 FIXUP FIX_ID_32BIT, 0, NAME(IDExitTarget) - NAME(Start)
1702dd 0
1703 FIXUP FIX_HYPER_CS, 0
1704dd 0
1705
1706 ; We're now on identity mapped pages!
1707ALIGNCODE(16)
1708GLOBALNAME IDExitTarget
1709BITS 32
1710 DEBUG32_CHAR('1')
1711
1712 ; 1. Deactivate long mode by turning off paging.
1713 mov ebx, cr0
1714 and ebx, ~X86_CR0_PG
1715 mov cr0, ebx
1716 DEBUG32_CHAR('2')
1717
1718 ; 2. Load intermediate page table.
1719 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
1720 mov edx, 0ffffffffh
1721 mov cr3, edx
1722 DEBUG32_CHAR('3')
1723
1724 ; 3. Disable long mode.
1725 mov ecx, MSR_K6_EFER
1726 rdmsr
1727 DEBUG32_CHAR('5')
1728 and eax, ~(MSR_K6_EFER_LME)
1729 wrmsr
1730 DEBUG32_CHAR('6')
1731
1732%ifndef NEED_PAE_ON_HOST
1733 ; 3b. Disable PAE.
1734 mov eax, cr4
1735 and eax, ~X86_CR4_PAE
1736 mov cr4, eax
1737 DEBUG32_CHAR('7')
1738%endif
1739
1740 ; 4. Enable paging.
1741 or ebx, X86_CR0_PG
1742 mov cr0, ebx
1743 jmp short just_a_jump
1744just_a_jump:
1745 DEBUG32_CHAR('8')
1746
1747 ;;
1748 ;; 5. Jump to guest code mapping of the code and load the Hypervisor CS.
1749 ;;
1750 FIXUP FIX_ID_2_HC_NEAR_REL, 1, NAME(ICExitTarget) - NAME(Start)
1751 jmp near NAME(ICExitTarget)
1752
1753 ;;
1754 ;; When we arrive at this label we're at the host mapping of the
1755 ;; switcher code, but with intermediate page tables.
1756 ;;
1757BITS 32
1758ALIGNCODE(16)
1759GLOBALNAME ICExitTarget
1760 DEBUG32_CHAR('9')
1761 ;DEBUG_CMOS_TRASH_AL 70h
1762
1763 ; load the hypervisor data selector into ds & es
1764 FIXUP FIX_HYPER_DS, 1
1765 mov eax, 0ffffh
1766 mov ds, eax
1767 mov es, eax
1768 DEBUG32_CHAR('a')
1769
1770 FIXUP FIX_GC_CPUM_OFF, 1, 0
1771 mov edx, 0ffffffffh
1772 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
1773
1774 DEBUG32_CHAR('b')
1775 mov esi, [edx + CPUMCPU.Host.cr3]
1776 mov cr3, esi
1777 DEBUG32_CHAR('c')
1778
1779 ;; now we're in host memory context, let's restore regs
1780 FIXUP FIX_HC_CPUM_OFF, 1, 0
1781 mov edx, 0ffffffffh
1782 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
1783 DEBUG32_CHAR('e')
1784
1785 ; restore the host EFER
1786 mov ebx, edx
1787 mov ecx, MSR_K6_EFER
1788 mov eax, [ebx + CPUMCPU.Host.efer]
1789 mov edx, [ebx + CPUMCPU.Host.efer + 4]
1790 DEBUG32_CHAR('f')
1791 wrmsr
1792 mov edx, ebx
1793 DEBUG32_CHAR('g')
1794
1795 ; activate host gdt and idt
1796 lgdt [edx + CPUMCPU.Host.gdtr]
1797 DEBUG32_CHAR('0')
1798 lidt [edx + CPUMCPU.Host.idtr]
1799 DEBUG32_CHAR('1')
1800
1801 ; Restore TSS selector; must mark it as not busy before using ltr (!)
1802 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
1803 movzx eax, word [edx + CPUMCPU.Host.tr] ; eax <- TR
1804 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
1805 add eax, [edx + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
1806 and dword [eax + 4], ~0200h ; clear busy flag (2nd type2 bit)
1807 ltr word [edx + CPUMCPU.Host.tr]
1808
1809 ; activate ldt
1810 DEBUG32_CHAR('2')
1811 lldt [edx + CPUMCPU.Host.ldtr]
1812
1813 ; Restore segment registers
1814 mov eax, [edx + CPUMCPU.Host.ds]
1815 mov ds, eax
1816 mov eax, [edx + CPUMCPU.Host.es]
1817 mov es, eax
1818 mov eax, [edx + CPUMCPU.Host.fs]
1819 mov fs, eax
1820 mov eax, [edx + CPUMCPU.Host.gs]
1821 mov gs, eax
1822 ; restore stack
1823 lss esp, [edx + CPUMCPU.Host.esp]
1824
1825 ; Control registers.
1826 mov ecx, [edx + CPUMCPU.Host.cr4]
1827 mov cr4, ecx
1828 mov ecx, [edx + CPUMCPU.Host.cr0]
1829 mov cr0, ecx
1830 ;mov ecx, [edx + CPUMCPU.Host.cr2] ; assumes this is waste of time.
1831 ;mov cr2, ecx
1832
1833 ; restore general registers.
1834 mov edi, [edx + CPUMCPU.Host.edi]
1835 mov esi, [edx + CPUMCPU.Host.esi]
1836 mov ebx, [edx + CPUMCPU.Host.ebx]
1837 mov ebp, [edx + CPUMCPU.Host.ebp]
1838
1839 ; store the return code in eax
1840 DEBUG_CMOS_TRASH_AL 79h
1841 mov eax, [edx + CPUMCPU.u32RetCode]
1842 retf
1843ENDPROC vmmRCToHostAsm
1844
1845
1846GLOBALNAME End
1847;
1848; The description string (in the text section).
1849;
1850NAME(Description):
1851 db SWITCHER_DESCRIPTION
1852 db 0
1853
1854extern NAME(Relocate)
1855
1856;
1857; End the fixup records.
1858;
1859BEGINDATA
1860 db FIX_THE_END ; final entry.
1861GLOBALNAME FixupsEnd
1862
1863;;
1864; The switcher definition structure.
1865ALIGNDATA(16)
1866GLOBALNAME Def
1867 istruc VMMSWITCHERDEF
1868 at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
1869 at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
1870 at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
1871 at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF NAME(Relocate)
1872 at VMMSWITCHERDEF.enmType, dd SWITCHER_TYPE
1873 at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
1874 at VMMSWITCHERDEF.offR0ToRawMode, dd NAME(vmmR0ToRawMode) - NAME(Start)
1875 at VMMSWITCHERDEF.offRCToHost, dd NAME(vmmRCToHost) - NAME(Start)
1876 at VMMSWITCHERDEF.offRCCallTrampoline, dd NAME(vmmRCCallTrampoline) - NAME(Start)
1877 at VMMSWITCHERDEF.offRCToHostAsm, dd NAME(vmmRCToHostAsm) - NAME(Start)
1878 at VMMSWITCHERDEF.offRCToHostAsmNoReturn, dd NAME(vmmRCToHostAsmNoReturn) - NAME(Start)
1879 ; disasm help
1880 at VMMSWITCHERDEF.offHCCode0, dd 0
1881 at VMMSWITCHERDEF.cbHCCode0, dd NAME(IDEnterTarget) - NAME(Start)
1882 at VMMSWITCHERDEF.offHCCode1, dd NAME(ICExitTarget) - NAME(Start)
1883 at VMMSWITCHERDEF.cbHCCode1, dd NAME(End) - NAME(ICExitTarget)
1884 at VMMSWITCHERDEF.offIDCode0, dd NAME(IDEnterTarget) - NAME(Start)
1885 at VMMSWITCHERDEF.cbIDCode0, dd NAME(ICEnterTarget) - NAME(IDEnterTarget)
1886 at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start)
1887 at VMMSWITCHERDEF.cbIDCode1, dd NAME(ICExitTarget) - NAME(Start)
1888%ifdef VBOX_WITH_64ON32_IDT ; Hack! Use offGCCode to find the IDT.
1889 at VMMSWITCHERDEF.offGCCode, dd NAME(vmm64On32Idt) - NAME(Start)
1890%else
1891 at VMMSWITCHERDEF.offGCCode, dd 0
1892%endif
1893 at VMMSWITCHERDEF.cbGCCode, dd 0
1894
1895 iend
1896
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette