Changeset 41933 in vbox for trunk/src/VBox/VMM/VMMSwitcher
- Timestamp:
- Jun 27, 2012 6:37:33 PM (13 years ago)
- svn:sync-xref-src-repo-rev:
- 78801
- Location:
- trunk/src/VBox/VMM/VMMSwitcher
- Files:
-
- 11 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMSwitcher/32BitTo32Bit.asm
r28800 r41933 5 5 6 6 ; 7 ; Copyright (C) 2006-20 07Oracle Corporation7 ; Copyright (C) 2006-2012 Oracle Corporation 8 8 ; 9 9 ; This file is part of VirtualBox Open Source Edition (OSE), as -
trunk/src/VBox/VMM/VMMSwitcher/32BitToAMD64.asm
r28800 r41933 5 5 6 6 ; 7 ; Copyright (C) 2006-20 07Oracle Corporation7 ; Copyright (C) 2006-2012 Oracle Corporation 8 8 ; 9 9 ; This file is part of VirtualBox Open Source Edition (OSE), as -
trunk/src/VBox/VMM/VMMSwitcher/32BitToPAE.asm
r28800 r41933 5 5 6 6 ; 7 ; Copyright (C) 2006-20 07Oracle Corporation7 ; Copyright (C) 2006-2012 Oracle Corporation 8 8 ; 9 9 ; This file is part of VirtualBox Open Source Edition (OSE), as -
trunk/src/VBox/VMM/VMMSwitcher/AMD64To32Bit.asm
r28800 r41933 5 5 6 6 ; 7 ; Copyright (C) 2006-20 07Oracle Corporation7 ; Copyright (C) 2006-2012 Oracle Corporation 8 8 ; 9 9 ; This file is part of VirtualBox Open Source Edition (OSE), as -
trunk/src/VBox/VMM/VMMSwitcher/AMD64ToPAE.asm
r28800 r41933 5 5 6 6 ; 7 ; Copyright (C) 2006-20 07Oracle Corporation7 ; Copyright (C) 2006-2012 Oracle Corporation 8 8 ; 9 9 ; This file is part of VirtualBox Open Source Edition (OSE), as -
trunk/src/VBox/VMM/VMMSwitcher/AMD64andLegacy.mac
r41906 r41933 5 5 6 6 ; 7 ; Copyright (C) 2006-20 07Oracle Corporation7 ; Copyright (C) 2006-2012 Oracle Corporation 8 8 ; 9 9 ; This file is part of VirtualBox Open Source Edition (OSE), as … … 691 691 692 692 ;; 693 ; VMMGCGuestToHostAsmGuestCtx 694 ; 695 ; Switches from Guest Context to Host Context. 696 ; Of course it's only called from within the GC. 693 ; VMMGCGuestToHostAsm 694 ; 695 ; This is an alternative entry point which we'll be using 696 ; when the we have saved the guest state already or we haven't 697 ; been messing with the guest at all. 697 698 ; 698 699 ; @param eax Return code. 699 ; @param esp + 4 Pointer to CPUMCTXCORE. 700 ; 701 ; @remark ASSUMES interrupts disabled. 702 ; 703 ALIGNCODE(16) 704 BEGINPROC VMMGCGuestToHostAsmGuestCtx 705 DEBUG_CHAR('~') 700 ; @uses eax, edx, ecx (or it may use them in the future) 701 ; 702 ALIGNCODE(16) 703 BEGINPROC VMMGCGuestToHostAsm 704 DEBUG_CHAR('%') 706 705 707 706 %ifdef VBOX_WITH_STATISTICS … … 720 719 721 720 ; 722 ; Load the CPUM CPUpointer.721 ; Load the CPUM pointer. 723 722 ; 724 723 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0 725 724 mov edx, 0ffffffffh 726 725 727 ; Skip return address (assumes called!)728 lea esp, [esp + 4]729 730 ;731 ; Guest Context (assumes esp now points to CPUMCTXCORE structure).732 ;733 ; general purpose registers734 push eax ; save return code.735 mov eax, [esp + 4 + CPUMCTXCORE.edi]736 mov [edx + CPUMCPU.Guest.edi], eax737 mov eax, [esp + 4 + CPUMCTXCORE.esi]738 mov [edx + CPUMCPU.Guest.esi], eax739 mov eax, [esp + 4 + CPUMCTXCORE.ebp]740 mov [edx + CPUMCPU.Guest.ebp], eax741 mov eax, [esp + 4 + CPUMCTXCORE.eax]742 mov [edx + CPUMCPU.Guest.eax], eax743 mov eax, [esp + 4 + CPUMCTXCORE.ebx]744 mov [edx + CPUMCPU.Guest.ebx], eax745 mov eax, [esp + 4 + CPUMCTXCORE.edx]746 mov [edx + CPUMCPU.Guest.edx], eax747 mov eax, [esp + 4 + CPUMCTXCORE.ecx]748 mov [edx + CPUMCPU.Guest.ecx], eax749 mov eax, [esp + 4 + CPUMCTXCORE.esp]750 mov [edx + CPUMCPU.Guest.esp], eax751 ; selectors752 mov eax, [esp + 4 + CPUMCTXCORE.ss.Sel]753 mov [edx + CPUMCPU.Guest.ss.Sel], eax754 mov eax, [esp + 4 + CPUMCTXCORE.gs.Sel]755 mov [edx + CPUMCPU.Guest.gs.Sel], eax756 mov eax, [esp + 4 + CPUMCTXCORE.fs.Sel]757 mov [edx + CPUMCPU.Guest.fs.Sel], eax758 mov eax, [esp + 4 + CPUMCTXCORE.es.Sel]759 mov [edx + CPUMCPU.Guest.es.Sel], eax760 mov eax, [esp + 4 + CPUMCTXCORE.ds.Sel]761 mov [edx + CPUMCPU.Guest.ds.Sel], eax762 mov eax, [esp + 4 + CPUMCTXCORE.cs.Sel]763 mov [edx + CPUMCPU.Guest.cs.Sel], eax764 ; flags765 mov eax, [esp + 4 + CPUMCTXCORE.eflags]766 mov [edx + CPUMCPU.Guest.eflags], eax767 ; eip768 mov eax, [esp + 4 + CPUMCTXCORE.eip]769 mov [edx + CPUMCPU.Guest.eip], eax770 ; jump to common worker code.771 pop eax ; restore return code.772 773 add esp, CPUMCTXCORE_size ; skip CPUMCTXCORE structure774 775 jmp vmmGCGuestToHostAsm_EIPDone776 ENDPROC VMMGCGuestToHostAsmGuestCtx777 778 779 ;;780 ; VMMGCGuestToHostAsmHyperCtx781 ;782 ; This is an alternative entry point which we'll be using783 ; when the we have the hypervisor context and need to save784 ; that before going to the host.785 ;786 ; This is typically useful when abandoning the hypervisor787 ; because of a trap and want the trap state to be saved.788 ;789 ; @param eax Return code.790 ; @param ecx Points to CPUMCTXCORE.791 ; @uses eax,edx,ecx792 ALIGNCODE(16)793 BEGINPROC VMMGCGuestToHostAsmHyperCtx794 DEBUG_CHAR('#')795 796 %ifdef VBOX_WITH_STATISTICS797 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC798 mov edx, 0ffffffffh799 STAM32_PROFILE_ADV_STOP edx800 801 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu802 mov edx, 0ffffffffh803 STAM32_PROFILE_ADV_START edx804 805 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC806 mov edx, 0ffffffffh807 STAM32_PROFILE_ADV_START edx808 %endif809 810 ;811 ; Load the CPUM pointer.812 ;813 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0814 mov edx, 0ffffffffh815 816 push eax ; save return code.817 ; general purpose registers818 mov eax, [ecx + CPUMCTXCORE.edi]819 mov [edx + CPUMCPU.Hyper.edi], eax820 mov eax, [ecx + CPUMCTXCORE.esi]821 mov [edx + CPUMCPU.Hyper.esi], eax822 mov eax, [ecx + CPUMCTXCORE.ebp]823 mov [edx + CPUMCPU.Hyper.ebp], eax824 mov eax, [ecx + CPUMCTXCORE.eax]825 mov [edx + CPUMCPU.Hyper.eax], eax826 mov eax, [ecx + CPUMCTXCORE.ebx]827 mov [edx + CPUMCPU.Hyper.ebx], eax828 mov eax, [ecx + CPUMCTXCORE.edx]829 mov [edx + CPUMCPU.Hyper.edx], eax830 mov eax, [ecx + CPUMCTXCORE.ecx]831 mov [edx + CPUMCPU.Hyper.ecx], eax832 mov eax, [ecx + CPUMCTXCORE.esp]833 mov [edx + CPUMCPU.Hyper.esp], eax834 ; selectors835 mov eax, [ecx + CPUMCTXCORE.ss.Sel]836 mov [edx + CPUMCPU.Hyper.ss.Sel], eax837 mov eax, [ecx + CPUMCTXCORE.gs.Sel]838 mov [edx + CPUMCPU.Hyper.gs.Sel], eax839 mov eax, [ecx + CPUMCTXCORE.fs.Sel]840 mov [edx + CPUMCPU.Hyper.fs.Sel], eax841 mov eax, [ecx + CPUMCTXCORE.es.Sel]842 mov [edx + CPUMCPU.Hyper.es.Sel], eax843 mov eax, [ecx + CPUMCTXCORE.ds.Sel]844 mov [edx + CPUMCPU.Hyper.ds.Sel], eax845 mov eax, [ecx + CPUMCTXCORE.cs.Sel]846 mov [edx + CPUMCPU.Hyper.cs.Sel], eax847 ; flags848 mov eax, [ecx + CPUMCTXCORE.eflags]849 mov [edx + CPUMCPU.Hyper.eflags], eax850 ; eip851 mov eax, [ecx + CPUMCTXCORE.eip]852 mov [edx + CPUMCPU.Hyper.eip], eax853 ; jump to common worker code.854 pop eax ; restore return code.855 jmp vmmGCGuestToHostAsm_SkipHyperRegs856 857 ENDPROC VMMGCGuestToHostAsmHyperCtx858 859 860 ;;861 ; VMMGCGuestToHostAsm862 ;863 ; This is an alternative entry point which we'll be using864 ; when the we have saved the guest state already or we haven't865 ; been messing with the guest at all.866 ;867 ; @param eax Return code.868 ; @uses eax, edx, ecx (or it may use them in the future)869 ;870 ALIGNCODE(16)871 BEGINPROC VMMGCGuestToHostAsm872 DEBUG_CHAR('%')873 874 %ifdef VBOX_WITH_STATISTICS875 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC876 mov edx, 0ffffffffh877 STAM32_PROFILE_ADV_STOP edx878 879 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu880 mov edx, 0ffffffffh881 STAM32_PROFILE_ADV_START edx882 883 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC884 mov edx, 0ffffffffh885 STAM32_PROFILE_ADV_START edx886 %endif887 888 ;889 ; Load the CPUM pointer.890 ;891 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0892 mov edx, 0ffffffffh893 894 726 pop dword [edx + CPUMCPU.Hyper.eip] ; call return from stack 895 jmp short vmmGCGuestToHostAsm_EIPDone 896 897 ALIGNCODE(16) 898 vmmGCGuestToHostAsm_EIPDone: 727 899 728 ; general registers which we care about. 900 729 mov dword [edx + CPUMCPU.Hyper.ebx], ebx … … 905 734 906 735 ; special registers which may change. 907 vmmGCGuestToHostAsm_SkipHyperRegs:908 736 %ifdef STRICT_IF 909 737 pushf … … 1232 1060 at VMMSWITCHERDEF.offGCCallTrampoline, dd NAME(vmmGCCallTrampoline) - NAME(Start) 1233 1061 at VMMSWITCHERDEF.offGCGuestToHostAsm, dd NAME(VMMGCGuestToHostAsm) - NAME(Start) 1234 at VMMSWITCHERDEF.offGCGuestToHostAsmHyperCtx, dd NAME(VMMGCGuestToHostAsmHyperCtx)- NAME(Start)1235 at VMMSWITCHERDEF.offGCGuestToHostAsmGuestCtx, dd NAME(VMMGCGuestToHostAsmGuestCtx)- NAME(Start)1236 1062 ; disasm help 1237 1063 at VMMSWITCHERDEF.offHCCode0, dd 0 -
trunk/src/VBox/VMM/VMMSwitcher/LegacyandAMD64.mac
r41907 r41933 3 3 4 4 ; 5 ; Copyright (C) 2006-20 07Oracle Corporation5 ; Copyright (C) 2006-2012 Oracle Corporation 6 6 ; 7 7 ; This file is part of VirtualBox Open Source Edition (OSE), as … … 660 660 ENDPROC VMMGCGuestToHostAsm 661 661 662 ;; 663 ; VMMGCGuestToHostAsmHyperCtx 664 ; 665 ; This is an alternative entry point which we'll be using 666 ; when the we have the hypervisor context and need to save 667 ; that before going to the host. 668 ; 669 ; This is typically useful when abandoning the hypervisor 670 ; because of a trap and want the trap state to be saved. 671 ; 672 ; @param eax Return code. 673 ; @param ecx Points to CPUMCTXCORE. 674 ; @uses eax,edx,ecx 675 ALIGNCODE(16) 676 BEGINPROC VMMGCGuestToHostAsmHyperCtx 677 int3 678 679 ;; 680 ; VMMGCGuestToHostAsmGuestCtx 681 ; 682 ; Switches from Guest Context to Host Context. 683 ; Of course it's only called from within the GC. 684 ; 685 ; @param eax Return code. 686 ; @param esp + 4 Pointer to CPUMCTXCORE. 687 ; 688 ; @remark ASSUMES interrupts disabled. 689 ; 690 ALIGNCODE(16) 691 BEGINPROC VMMGCGuestToHostAsmGuestCtx 692 int3 693 694 GLOBALNAME End 662 695 663 ; 696 664 ; The description string (in the text section). … … 724 692 at VMMSWITCHERDEF.offGCCallTrampoline, dd NAME(vmmGCCallTrampoline) - NAME(Start) 725 693 at VMMSWITCHERDEF.offGCGuestToHostAsm, dd NAME(VMMGCGuestToHostAsm) - NAME(Start) 726 at VMMSWITCHERDEF.offGCGuestToHostAsmHyperCtx, dd NAME(VMMGCGuestToHostAsmHyperCtx)- NAME(Start)727 at VMMSWITCHERDEF.offGCGuestToHostAsmGuestCtx, dd NAME(VMMGCGuestToHostAsmGuestCtx)- NAME(Start)728 694 ; disasm help 729 695 at VMMSWITCHERDEF.offHCCode0, dd 0 -
trunk/src/VBox/VMM/VMMSwitcher/PAETo32Bit.asm
r28800 r41933 5 5 6 6 ; 7 ; Copyright (C) 2006-20 07Oracle Corporation7 ; Copyright (C) 2006-2012 Oracle Corporation 8 8 ; 9 9 ; This file is part of VirtualBox Open Source Edition (OSE), as -
trunk/src/VBox/VMM/VMMSwitcher/PAEToAMD64.asm
r28800 r41933 5 5 6 6 ; 7 ; Copyright (C) 2006-20 07Oracle Corporation7 ; Copyright (C) 2006-2012 Oracle Corporation 8 8 ; 9 9 ; This file is part of VirtualBox Open Source Edition (OSE), as -
trunk/src/VBox/VMM/VMMSwitcher/PAEToPAE.asm
r28800 r41933 5 5 6 6 ; 7 ; Copyright (C) 2006-20 07Oracle Corporation7 ; Copyright (C) 2006-2012 Oracle Corporation 8 8 ; 9 9 ; This file is part of VirtualBox Open Source Edition (OSE), as -
trunk/src/VBox/VMM/VMMSwitcher/PAEand32Bit.mac
r41907 r41933 5 5 6 6 ; 7 ; Copyright (C) 2006-20 07Oracle Corporation7 ; Copyright (C) 2006-2012 Oracle Corporation 8 8 ; 9 9 ; This file is part of VirtualBox Open Source Edition (OSE), as … … 595 595 596 596 ;; 597 ; VMMGCGuestToHostAsmGuestCtx598 ;599 ; Switches from Guest Context to Host Context.600 ; Of course it's only called from within the GC.601 ;602 ; @param eax Return code.603 ; @param esp + 4 Pointer to CPUMCTXCORE.604 ;605 ; @remark ASSUMES interrupts disabled.606 ;607 ALIGNCODE(16)608 BEGINPROC VMMGCGuestToHostAsmGuestCtx609 DEBUG_CHAR('~')610 611 %ifdef VBOX_WITH_STATISTICS612 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC613 mov edx, 0ffffffffh614 STAM_PROFILE_ADV_STOP edx615 616 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu617 mov edx, 0ffffffffh618 STAM_PROFILE_ADV_START edx619 620 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC621 mov edx, 0ffffffffh622 STAM_PROFILE_ADV_START edx623 %endif624 625 ;626 ; Load the CPUMCPU pointer.627 ;628 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0629 mov edx, 0ffffffffh630 631 ; Skip return address (assumes called!)632 lea esp, [esp + 4]633 634 ;635 ; Guest Context (assumes esp now points to CPUMCTXCORE structure).636 ;637 ; general purpose registers.638 push eax639 640 mov eax, [esp + 4 + CPUMCTXCORE.eax]641 mov [edx + CPUMCPU.Guest.eax], eax642 mov eax, [esp + 4 + CPUMCTXCORE.ecx]643 mov [edx + CPUMCPU.Guest.ecx], eax644 mov eax, [esp + 4 + CPUMCTXCORE.edx]645 mov [edx + CPUMCPU.Guest.edx], eax646 mov eax, [esp + 4 + CPUMCTXCORE.ebx]647 mov [edx + CPUMCPU.Guest.ebx], eax648 mov eax, [esp + 4 + CPUMCTXCORE.esp]649 mov [edx + CPUMCPU.Guest.esp], eax650 mov eax, [esp + 4 + CPUMCTXCORE.ebp]651 mov [edx + CPUMCPU.Guest.ebp], eax652 mov eax, [esp + 4 + CPUMCTXCORE.esi]653 mov [edx + CPUMCPU.Guest.esi], eax654 mov eax, [esp + 4 + CPUMCTXCORE.edi]655 mov [edx + CPUMCPU.Guest.edi], eax656 mov eax, dword [esp + 4 + CPUMCTXCORE.es.Sel]657 mov dword [edx + CPUMCPU.Guest.es.Sel], eax658 mov eax, dword [esp + 4 + CPUMCTXCORE.cs.Sel]659 mov dword [edx + CPUMCPU.Guest.cs.Sel], eax660 mov eax, dword [esp + 4 + CPUMCTXCORE.ss.Sel]661 mov dword [edx + CPUMCPU.Guest.ss.Sel], eax662 mov eax, dword [esp + 4 + CPUMCTXCORE.ds.Sel]663 mov dword [edx + CPUMCPU.Guest.ds.Sel], eax664 mov eax, dword [esp + 4 + CPUMCTXCORE.fs.Sel]665 mov dword [edx + CPUMCPU.Guest.fs.Sel], eax666 mov eax, dword [esp + 4 + CPUMCTXCORE.gs.Sel]667 mov dword [edx + CPUMCPU.Guest.gs.Sel], eax668 mov eax, [esp + 4 + CPUMCTXCORE.eflags]669 mov dword [edx + CPUMCPU.Guest.eflags], eax670 mov eax, [esp + 4 + CPUMCTXCORE.eip]671 mov dword [edx + CPUMCPU.Guest.eip], eax672 pop eax673 674 add esp, CPUMCTXCORE_size ; skip CPUMCTXCORE structure675 676 jmp vmmGCGuestToHostAsm_EIPDone677 ENDPROC VMMGCGuestToHostAsmGuestCtx678 679 680 ;;681 ; VMMGCGuestToHostAsmHyperCtx682 ;683 ; This is an alternative entry point which we'll be using684 ; when the we have the hypervisor context and need to save685 ; that before going to the host.686 ;687 ; This is typically useful when abandoning the hypervisor688 ; because of a trap and want the trap state to be saved.689 ;690 ; @param eax Return code.691 ; @param ecx Points to CPUMCTXCORE.692 ; @uses eax,edx,ecx693 ALIGNCODE(16)694 BEGINPROC VMMGCGuestToHostAsmHyperCtx695 DEBUG_CHAR('#')696 697 %ifdef VBOX_WITH_STATISTICS698 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC699 mov edx, 0ffffffffh700 STAM_PROFILE_ADV_STOP edx701 702 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu703 mov edx, 0ffffffffh704 STAM_PROFILE_ADV_START edx705 706 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC707 mov edx, 0ffffffffh708 STAM_PROFILE_ADV_START edx709 %endif710 711 ;712 ; Load the CPUM pointer.713 ;714 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0715 mov edx, 0ffffffffh716 717 push eax ; save return code.718 ; general purpose registers719 mov eax, [ecx + CPUMCTXCORE.edi]720 mov [edx + CPUMCPU.Hyper.edi], eax721 mov eax, [ecx + CPUMCTXCORE.esi]722 mov [edx + CPUMCPU.Hyper.esi], eax723 mov eax, [ecx + CPUMCTXCORE.ebp]724 mov [edx + CPUMCPU.Hyper.ebp], eax725 mov eax, [ecx + CPUMCTXCORE.eax]726 mov [edx + CPUMCPU.Hyper.eax], eax727 mov eax, [ecx + CPUMCTXCORE.ebx]728 mov [edx + CPUMCPU.Hyper.ebx], eax729 mov eax, [ecx + CPUMCTXCORE.edx]730 mov [edx + CPUMCPU.Hyper.edx], eax731 mov eax, [ecx + CPUMCTXCORE.ecx]732 mov [edx + CPUMCPU.Hyper.ecx], eax733 mov eax, [ecx + CPUMCTXCORE.esp]734 mov [edx + CPUMCPU.Hyper.esp], eax735 ; selectors736 mov eax, [ecx + CPUMCTXCORE.ss.Sel]737 mov [edx + CPUMCPU.Hyper.ss.Sel], eax738 mov eax, [ecx + CPUMCTXCORE.gs.Sel]739 mov [edx + CPUMCPU.Hyper.gs.Sel], eax740 mov eax, [ecx + CPUMCTXCORE.fs.Sel]741 mov [edx + CPUMCPU.Hyper.fs.Sel], eax742 mov eax, [ecx + CPUMCTXCORE.es.Sel]743 mov [edx + CPUMCPU.Hyper.es.Sel], eax744 mov eax, [ecx + CPUMCTXCORE.ds.Sel]745 mov [edx + CPUMCPU.Hyper.ds.Sel], eax746 mov eax, [ecx + CPUMCTXCORE.cs.Sel]747 mov [edx + CPUMCPU.Hyper.cs.Sel], eax748 ; flags749 mov eax, [ecx + CPUMCTXCORE.eflags]750 mov [edx + CPUMCPU.Hyper.eflags], eax751 ; eip752 mov eax, [ecx + CPUMCTXCORE.eip]753 mov [edx + CPUMCPU.Hyper.eip], eax754 ; jump to common worker code.755 pop eax ; restore return code.756 jmp vmmGCGuestToHostAsm_SkipHyperRegs757 758 ENDPROC VMMGCGuestToHostAsmHyperCtx759 760 761 ;;762 597 ; VMMGCGuestToHostAsm 763 598 ; … … 794 629 795 630 pop dword [edx + CPUMCPU.Hyper.eip] ; call return from stack 796 jmp short vmmGCGuestToHostAsm_EIPDone 797 798 ALIGNCODE(16) 799 vmmGCGuestToHostAsm_EIPDone: 631 800 632 ; general registers which we care about. 801 633 mov dword [edx + CPUMCPU.Hyper.ebx], ebx … … 806 638 807 639 ; special registers which may change. 808 vmmGCGuestToHostAsm_SkipHyperRegs:809 640 ; str [edx + CPUMCPU.Hyper.tr] - double fault only, and it won't be right then either. 810 641 sldt [edx + CPUMCPU.Hyper.ldtr.Sel] … … 1092 923 at VMMSWITCHERDEF.offGCCallTrampoline, dd NAME(vmmGCCallTrampoline) - NAME(Start) 1093 924 at VMMSWITCHERDEF.offGCGuestToHostAsm, dd NAME(VMMGCGuestToHostAsm) - NAME(Start) 1094 at VMMSWITCHERDEF.offGCGuestToHostAsmHyperCtx, dd NAME(VMMGCGuestToHostAsmHyperCtx)- NAME(Start)1095 at VMMSWITCHERDEF.offGCGuestToHostAsmGuestCtx, dd NAME(VMMGCGuestToHostAsmGuestCtx)- NAME(Start)1096 925 ; disasm help 1097 926 at VMMSWITCHERDEF.offHCCode0, dd 0
Note:
See TracChangeset
for help on using the changeset viewer.