- Timestamp:
- Jun 27, 2012 6:37:33 PM (12 years ago)
- Location:
- trunk
- Files:
-
- 18 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/vm.h
r41905 r41933 789 789 uint32_t offVMCPU; 790 790 791 /** Reserved; alignment. */792 uint32_t u32Reserved[5];793 794 /** @name Public VMM Switcher APIs795 * @{ */796 791 /** 797 * Assembly switch entry point for returning to host context. 798 * This function will clean up the stack frame. 792 * VMMSwitcher assembly entry point returning to host context. 799 793 * 800 * @param eax The return code, register. 801 * @param Ctx The guest core context. 802 * @remark Assume interrupts disabled. 803 */ 804 RTRCPTR pfnVMMGCGuestToHostAsmGuestCtx/*(int32_t eax, CPUMCTXCORE Ctx)*/; 805 806 /** 807 * Assembly switch entry point for returning to host context. 808 * 809 * This is an alternative entry point which we'll be using when the we have the 810 * hypervisor context and need to save that before going to the host. 811 * 812 * This is typically useful when abandoning the hypervisor because of a trap 813 * and want the trap state to be saved. 814 * 815 * @param eax The return code, register. 816 * @param ecx Pointer to the hypervisor core context, register. 817 * @remark Assume interrupts disabled. 818 */ 819 RTRCPTR pfnVMMGCGuestToHostAsmHyperCtx/*(int32_t eax, PCPUMCTXCORE ecx)*/; 820 821 /** 822 * Assembly switch entry point for returning to host context. 823 * 824 * This is an alternative to the two *Ctx APIs and implies that the context has already 825 * been saved, or that it's just a brief return to HC and that the caller intends to resume 826 * whatever it is doing upon 'return' from this call. 794 * Depending on how the host handles the rc status given in @a eax, this may 795 * return and let the caller resume whatever it was doing prior to the call. 796 * This method pointer lives here because TRPM needs it. 827 797 * 828 798 * @param eax The return code, register. … … 830 800 */ 831 801 RTRCPTR pfnVMMGCGuestToHostAsm/*(int32_t eax)*/; 832 /** @} */833 834 835 /** @name Various VM data owned by VM.836 * @{ */837 RTTHREAD uPadding1;838 /** The native handle of ThreadEMT. Getting the native handle839 * is generally faster than getting the IPRT one (except on OS/2 :-). */840 RTNATIVETHREAD uPadding2;841 /** @} */842 843 802 844 803 /** @name Various items that are frequently accessed. … … 870 829 /** Raw-mode Context VM Pointer. */ 871 830 RCPTRTYPE(RTTRACEBUF) hTraceBufRC; 872 /** Alignment padding */873 uint32_t uPadding3;874 831 /** Ring-3 Host Context VM Pointer. */ 875 832 R3PTRTYPE(RTTRACEBUF) hTraceBufR3; … … 880 837 #if HC_ARCH_BITS == 32 881 838 /** Alignment padding.. */ 882 uint32_t uPadding 4;839 uint32_t uPadding2; 883 840 #endif 884 841 … … 910 867 /** @} */ 911 868 912 #if HC_ARCH_BITS != 64913 869 /** Padding - the unions must be aligned on a 64 bytes boundary and the unions 914 870 * must start at the same offset on both 64-bit and 32-bit hosts. */ 915 uint8_t abAlignment1[HC_ARCH_BITS == 32 ? 32 : 0]; 916 #endif 871 uint8_t abAlignment3[(HC_ARCH_BITS == 32 ? 24 : 0) + 48]; 917 872 918 873 /** CPUM part. */ -
trunk/include/VBox/vmm/vm.mac
r41905 r41933 55 55 .cbSelf resd 1 56 56 .offVMCPU resd 1 57 .u32Reserved resd 5 58 59 .pfnVMMGCGuestToHostAsmGuestCtx RTRCPTR_RES 1 60 .pfnVMMGCGuestToHostAsmHyperCtx RTRCPTR_RES 1 61 .pfnVMMGCGuestToHostAsm RTRCPTR_RES 1 62 63 .uPadding1 RTHCPTR_RES 1 64 .uPadding2 RTHCPTR_RES 1 65 57 .pfnVMMGCGuestToHostAsm resd 1 66 58 .fRecompileUser resb 1 67 59 .fRecompileSupervisor resb 1 … … 74 66 75 67 .hTraceBufRC RTRCPTR_RES 1 76 .uPadding3 resd 177 68 .hTraceBufR3 RTR3PTR_RES 1 78 69 .hTraceBufR0 RTR0PTR_RES 1 … … 102 93 %endif 103 94 %if HC_ARCH_BITS == 32 104 .abAlignment 1 resb 3095 .abAlignment3 resb 24 105 96 %else 106 ; .abAlignment 1 resb 097 ; .abAlignment3 resb 16 107 98 %endif 108 99 -
trunk/src/VBox/VMM/VMMR3/VMMSwitcher.cpp
r41800 r41933 261 261 pVM->vmm.s.pfnCallTrampolineRC = RCPtr + pSwitcher->offGCCallTrampoline; 262 262 pVM->pfnVMMGCGuestToHostAsm = RCPtr + pSwitcher->offGCGuestToHostAsm; 263 pVM->pfnVMMGCGuestToHostAsmHyperCtx = RCPtr + pSwitcher->offGCGuestToHostAsmHyperCtx;264 pVM->pfnVMMGCGuestToHostAsmGuestCtx = RCPtr + pSwitcher->offGCGuestToHostAsmGuestCtx;265 263 266 264 // AssertFailed(); … … 826 824 if (pSwitcher->offGCGuestToHostAsm == offCode) 827 825 RTLogPrintf(" *GCGuestToHostAsm:\n"); 828 if (pSwitcher->offGCGuestToHostAsmHyperCtx == offCode)829 RTLogPrintf(" *GCGuestToHostAsmHyperCtx:\n");830 if (pSwitcher->offGCGuestToHostAsmGuestCtx == offCode)831 RTLogPrintf(" *GCGuestToHostAsmGuestCtx:\n");832 826 833 827 /* disas */ … … 976 970 pVM->vmm.s.pfnHostToGuestR0 = pbCodeR0 + pSwitcher->offR0HostToGuest; 977 971 978 RTGCPTR GCPtr = pVM->vmm.s.pvCoreCodeRC + pVM->vmm.s.aoffSwitchers[enmSwitcher]; 979 pVM->vmm.s.pfnGuestToHostRC = GCPtr + pSwitcher->offGCGuestToHost; 980 pVM->vmm.s.pfnCallTrampolineRC = GCPtr + pSwitcher->offGCCallTrampoline; 981 pVM->pfnVMMGCGuestToHostAsm = GCPtr + pSwitcher->offGCGuestToHostAsm; 982 pVM->pfnVMMGCGuestToHostAsmHyperCtx = GCPtr + pSwitcher->offGCGuestToHostAsmHyperCtx; 983 pVM->pfnVMMGCGuestToHostAsmGuestCtx = GCPtr + pSwitcher->offGCGuestToHostAsmGuestCtx; 972 RTRCPTR RCPtr = pVM->vmm.s.pvCoreCodeRC + pVM->vmm.s.aoffSwitchers[enmSwitcher]; 973 pVM->vmm.s.pfnGuestToHostRC = RCPtr + pSwitcher->offGCGuestToHost; 974 pVM->vmm.s.pfnCallTrampolineRC = RCPtr + pSwitcher->offGCCallTrampoline; 975 pVM->pfnVMMGCGuestToHostAsm = RCPtr + pSwitcher->offGCGuestToHostAsm; 984 976 return VINF_SUCCESS; 985 977 } -
trunk/src/VBox/VMM/VMMSwitcher/32BitTo32Bit.asm
r28800 r41933 5 5 6 6 ; 7 ; Copyright (C) 2006-20 07Oracle Corporation7 ; Copyright (C) 2006-2012 Oracle Corporation 8 8 ; 9 9 ; This file is part of VirtualBox Open Source Edition (OSE), as -
trunk/src/VBox/VMM/VMMSwitcher/32BitToAMD64.asm
r28800 r41933 5 5 6 6 ; 7 ; Copyright (C) 2006-20 07Oracle Corporation7 ; Copyright (C) 2006-2012 Oracle Corporation 8 8 ; 9 9 ; This file is part of VirtualBox Open Source Edition (OSE), as -
trunk/src/VBox/VMM/VMMSwitcher/32BitToPAE.asm
r28800 r41933 5 5 6 6 ; 7 ; Copyright (C) 2006-20 07Oracle Corporation7 ; Copyright (C) 2006-2012 Oracle Corporation 8 8 ; 9 9 ; This file is part of VirtualBox Open Source Edition (OSE), as -
trunk/src/VBox/VMM/VMMSwitcher/AMD64To32Bit.asm
r28800 r41933 5 5 6 6 ; 7 ; Copyright (C) 2006-20 07Oracle Corporation7 ; Copyright (C) 2006-2012 Oracle Corporation 8 8 ; 9 9 ; This file is part of VirtualBox Open Source Edition (OSE), as -
trunk/src/VBox/VMM/VMMSwitcher/AMD64ToPAE.asm
r28800 r41933 5 5 6 6 ; 7 ; Copyright (C) 2006-20 07Oracle Corporation7 ; Copyright (C) 2006-2012 Oracle Corporation 8 8 ; 9 9 ; This file is part of VirtualBox Open Source Edition (OSE), as -
trunk/src/VBox/VMM/VMMSwitcher/AMD64andLegacy.mac
r41906 r41933 5 5 6 6 ; 7 ; Copyright (C) 2006-20 07Oracle Corporation7 ; Copyright (C) 2006-2012 Oracle Corporation 8 8 ; 9 9 ; This file is part of VirtualBox Open Source Edition (OSE), as … … 691 691 692 692 ;; 693 ; VMMGCGuestToHostAsmGuestCtx 694 ; 695 ; Switches from Guest Context to Host Context. 696 ; Of course it's only called from within the GC. 693 ; VMMGCGuestToHostAsm 694 ; 695 ; This is an alternative entry point which we'll be using 696 ; when the we have saved the guest state already or we haven't 697 ; been messing with the guest at all. 697 698 ; 698 699 ; @param eax Return code. 699 ; @param esp + 4 Pointer to CPUMCTXCORE. 700 ; 701 ; @remark ASSUMES interrupts disabled. 702 ; 703 ALIGNCODE(16) 704 BEGINPROC VMMGCGuestToHostAsmGuestCtx 705 DEBUG_CHAR('~') 700 ; @uses eax, edx, ecx (or it may use them in the future) 701 ; 702 ALIGNCODE(16) 703 BEGINPROC VMMGCGuestToHostAsm 704 DEBUG_CHAR('%') 706 705 707 706 %ifdef VBOX_WITH_STATISTICS … … 720 719 721 720 ; 722 ; Load the CPUM CPUpointer.721 ; Load the CPUM pointer. 723 722 ; 724 723 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0 725 724 mov edx, 0ffffffffh 726 725 727 ; Skip return address (assumes called!)728 lea esp, [esp + 4]729 730 ;731 ; Guest Context (assumes esp now points to CPUMCTXCORE structure).732 ;733 ; general purpose registers734 push eax ; save return code.735 mov eax, [esp + 4 + CPUMCTXCORE.edi]736 mov [edx + CPUMCPU.Guest.edi], eax737 mov eax, [esp + 4 + CPUMCTXCORE.esi]738 mov [edx + CPUMCPU.Guest.esi], eax739 mov eax, [esp + 4 + CPUMCTXCORE.ebp]740 mov [edx + CPUMCPU.Guest.ebp], eax741 mov eax, [esp + 4 + CPUMCTXCORE.eax]742 mov [edx + CPUMCPU.Guest.eax], eax743 mov eax, [esp + 4 + CPUMCTXCORE.ebx]744 mov [edx + CPUMCPU.Guest.ebx], eax745 mov eax, [esp + 4 + CPUMCTXCORE.edx]746 mov [edx + CPUMCPU.Guest.edx], eax747 mov eax, [esp + 4 + CPUMCTXCORE.ecx]748 mov [edx + CPUMCPU.Guest.ecx], eax749 mov eax, [esp + 4 + CPUMCTXCORE.esp]750 mov [edx + CPUMCPU.Guest.esp], eax751 ; selectors752 mov eax, [esp + 4 + CPUMCTXCORE.ss.Sel]753 mov [edx + CPUMCPU.Guest.ss.Sel], eax754 mov eax, [esp + 4 + CPUMCTXCORE.gs.Sel]755 mov [edx + CPUMCPU.Guest.gs.Sel], eax756 mov eax, [esp + 4 + CPUMCTXCORE.fs.Sel]757 mov [edx + CPUMCPU.Guest.fs.Sel], eax758 mov eax, [esp + 4 + CPUMCTXCORE.es.Sel]759 mov [edx + CPUMCPU.Guest.es.Sel], eax760 mov eax, [esp + 4 + CPUMCTXCORE.ds.Sel]761 mov [edx + CPUMCPU.Guest.ds.Sel], eax762 mov eax, [esp + 4 + CPUMCTXCORE.cs.Sel]763 mov [edx + CPUMCPU.Guest.cs.Sel], eax764 ; flags765 mov eax, [esp + 4 + CPUMCTXCORE.eflags]766 mov [edx + CPUMCPU.Guest.eflags], eax767 ; eip768 mov eax, [esp + 4 + CPUMCTXCORE.eip]769 mov [edx + CPUMCPU.Guest.eip], eax770 ; jump to common worker code.771 pop eax ; restore return code.772 773 add esp, CPUMCTXCORE_size ; skip CPUMCTXCORE structure774 775 jmp vmmGCGuestToHostAsm_EIPDone776 ENDPROC VMMGCGuestToHostAsmGuestCtx777 778 779 ;;780 ; VMMGCGuestToHostAsmHyperCtx781 ;782 ; This is an alternative entry point which we'll be using783 ; when the we have the hypervisor context and need to save784 ; that before going to the host.785 ;786 ; This is typically useful when abandoning the hypervisor787 ; because of a trap and want the trap state to be saved.788 ;789 ; @param eax Return code.790 ; @param ecx Points to CPUMCTXCORE.791 ; @uses eax,edx,ecx792 ALIGNCODE(16)793 BEGINPROC VMMGCGuestToHostAsmHyperCtx794 DEBUG_CHAR('#')795 796 %ifdef VBOX_WITH_STATISTICS797 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC798 mov edx, 0ffffffffh799 STAM32_PROFILE_ADV_STOP edx800 801 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu802 mov edx, 0ffffffffh803 STAM32_PROFILE_ADV_START edx804 805 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC806 mov edx, 0ffffffffh807 STAM32_PROFILE_ADV_START edx808 %endif809 810 ;811 ; Load the CPUM pointer.812 ;813 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0814 mov edx, 0ffffffffh815 816 push eax ; save return code.817 ; general purpose registers818 mov eax, [ecx + CPUMCTXCORE.edi]819 mov [edx + CPUMCPU.Hyper.edi], eax820 mov eax, [ecx + CPUMCTXCORE.esi]821 mov [edx + CPUMCPU.Hyper.esi], eax822 mov eax, [ecx + CPUMCTXCORE.ebp]823 mov [edx + CPUMCPU.Hyper.ebp], eax824 mov eax, [ecx + CPUMCTXCORE.eax]825 mov [edx + CPUMCPU.Hyper.eax], eax826 mov eax, [ecx + CPUMCTXCORE.ebx]827 mov [edx + CPUMCPU.Hyper.ebx], eax828 mov eax, [ecx + CPUMCTXCORE.edx]829 mov [edx + CPUMCPU.Hyper.edx], eax830 mov eax, [ecx + CPUMCTXCORE.ecx]831 mov [edx + CPUMCPU.Hyper.ecx], eax832 mov eax, [ecx + CPUMCTXCORE.esp]833 mov [edx + CPUMCPU.Hyper.esp], eax834 ; selectors835 mov eax, [ecx + CPUMCTXCORE.ss.Sel]836 mov [edx + CPUMCPU.Hyper.ss.Sel], eax837 mov eax, [ecx + CPUMCTXCORE.gs.Sel]838 mov [edx + CPUMCPU.Hyper.gs.Sel], eax839 mov eax, [ecx + CPUMCTXCORE.fs.Sel]840 mov [edx + CPUMCPU.Hyper.fs.Sel], eax841 mov eax, [ecx + CPUMCTXCORE.es.Sel]842 mov [edx + CPUMCPU.Hyper.es.Sel], eax843 mov eax, [ecx + CPUMCTXCORE.ds.Sel]844 mov [edx + CPUMCPU.Hyper.ds.Sel], eax845 mov eax, [ecx + CPUMCTXCORE.cs.Sel]846 mov [edx + CPUMCPU.Hyper.cs.Sel], eax847 ; flags848 mov eax, [ecx + CPUMCTXCORE.eflags]849 mov [edx + CPUMCPU.Hyper.eflags], eax850 ; eip851 mov eax, [ecx + CPUMCTXCORE.eip]852 mov [edx + CPUMCPU.Hyper.eip], eax853 ; jump to common worker code.854 pop eax ; restore return code.855 jmp vmmGCGuestToHostAsm_SkipHyperRegs856 857 ENDPROC VMMGCGuestToHostAsmHyperCtx858 859 860 ;;861 ; VMMGCGuestToHostAsm862 ;863 ; This is an alternative entry point which we'll be using864 ; when the we have saved the guest state already or we haven't865 ; been messing with the guest at all.866 ;867 ; @param eax Return code.868 ; @uses eax, edx, ecx (or it may use them in the future)869 ;870 ALIGNCODE(16)871 BEGINPROC VMMGCGuestToHostAsm872 DEBUG_CHAR('%')873 874 %ifdef VBOX_WITH_STATISTICS875 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC876 mov edx, 0ffffffffh877 STAM32_PROFILE_ADV_STOP edx878 879 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu880 mov edx, 0ffffffffh881 STAM32_PROFILE_ADV_START edx882 883 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC884 mov edx, 0ffffffffh885 STAM32_PROFILE_ADV_START edx886 %endif887 888 ;889 ; Load the CPUM pointer.890 ;891 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0892 mov edx, 0ffffffffh893 894 726 pop dword [edx + CPUMCPU.Hyper.eip] ; call return from stack 895 jmp short vmmGCGuestToHostAsm_EIPDone 896 897 ALIGNCODE(16) 898 vmmGCGuestToHostAsm_EIPDone: 727 899 728 ; general registers which we care about. 900 729 mov dword [edx + CPUMCPU.Hyper.ebx], ebx … … 905 734 906 735 ; special registers which may change. 907 vmmGCGuestToHostAsm_SkipHyperRegs:908 736 %ifdef STRICT_IF 909 737 pushf … … 1232 1060 at VMMSWITCHERDEF.offGCCallTrampoline, dd NAME(vmmGCCallTrampoline) - NAME(Start) 1233 1061 at VMMSWITCHERDEF.offGCGuestToHostAsm, dd NAME(VMMGCGuestToHostAsm) - NAME(Start) 1234 at VMMSWITCHERDEF.offGCGuestToHostAsmHyperCtx, dd NAME(VMMGCGuestToHostAsmHyperCtx)- NAME(Start)1235 at VMMSWITCHERDEF.offGCGuestToHostAsmGuestCtx, dd NAME(VMMGCGuestToHostAsmGuestCtx)- NAME(Start)1236 1062 ; disasm help 1237 1063 at VMMSWITCHERDEF.offHCCode0, dd 0 -
trunk/src/VBox/VMM/VMMSwitcher/LegacyandAMD64.mac
r41907 r41933 3 3 4 4 ; 5 ; Copyright (C) 2006-20 07Oracle Corporation5 ; Copyright (C) 2006-2012 Oracle Corporation 6 6 ; 7 7 ; This file is part of VirtualBox Open Source Edition (OSE), as … … 660 660 ENDPROC VMMGCGuestToHostAsm 661 661 662 ;; 663 ; VMMGCGuestToHostAsmHyperCtx 664 ; 665 ; This is an alternative entry point which we'll be using 666 ; when the we have the hypervisor context and need to save 667 ; that before going to the host. 668 ; 669 ; This is typically useful when abandoning the hypervisor 670 ; because of a trap and want the trap state to be saved. 671 ; 672 ; @param eax Return code. 673 ; @param ecx Points to CPUMCTXCORE. 674 ; @uses eax,edx,ecx 675 ALIGNCODE(16) 676 BEGINPROC VMMGCGuestToHostAsmHyperCtx 677 int3 678 679 ;; 680 ; VMMGCGuestToHostAsmGuestCtx 681 ; 682 ; Switches from Guest Context to Host Context. 683 ; Of course it's only called from within the GC. 684 ; 685 ; @param eax Return code. 686 ; @param esp + 4 Pointer to CPUMCTXCORE. 687 ; 688 ; @remark ASSUMES interrupts disabled. 689 ; 690 ALIGNCODE(16) 691 BEGINPROC VMMGCGuestToHostAsmGuestCtx 692 int3 693 694 GLOBALNAME End 662 695 663 ; 696 664 ; The description string (in the text section). … … 724 692 at VMMSWITCHERDEF.offGCCallTrampoline, dd NAME(vmmGCCallTrampoline) - NAME(Start) 725 693 at VMMSWITCHERDEF.offGCGuestToHostAsm, dd NAME(VMMGCGuestToHostAsm) - NAME(Start) 726 at VMMSWITCHERDEF.offGCGuestToHostAsmHyperCtx, dd NAME(VMMGCGuestToHostAsmHyperCtx)- NAME(Start)727 at VMMSWITCHERDEF.offGCGuestToHostAsmGuestCtx, dd NAME(VMMGCGuestToHostAsmGuestCtx)- NAME(Start)728 694 ; disasm help 729 695 at VMMSWITCHERDEF.offHCCode0, dd 0 -
trunk/src/VBox/VMM/VMMSwitcher/PAETo32Bit.asm
r28800 r41933 5 5 6 6 ; 7 ; Copyright (C) 2006-20 07Oracle Corporation7 ; Copyright (C) 2006-2012 Oracle Corporation 8 8 ; 9 9 ; This file is part of VirtualBox Open Source Edition (OSE), as -
trunk/src/VBox/VMM/VMMSwitcher/PAEToAMD64.asm
r28800 r41933 5 5 6 6 ; 7 ; Copyright (C) 2006-20 07Oracle Corporation7 ; Copyright (C) 2006-2012 Oracle Corporation 8 8 ; 9 9 ; This file is part of VirtualBox Open Source Edition (OSE), as -
trunk/src/VBox/VMM/VMMSwitcher/PAEToPAE.asm
r28800 r41933 5 5 6 6 ; 7 ; Copyright (C) 2006-20 07Oracle Corporation7 ; Copyright (C) 2006-2012 Oracle Corporation 8 8 ; 9 9 ; This file is part of VirtualBox Open Source Edition (OSE), as -
trunk/src/VBox/VMM/VMMSwitcher/PAEand32Bit.mac
r41907 r41933 5 5 6 6 ; 7 ; Copyright (C) 2006-20 07Oracle Corporation7 ; Copyright (C) 2006-2012 Oracle Corporation 8 8 ; 9 9 ; This file is part of VirtualBox Open Source Edition (OSE), as … … 595 595 596 596 ;; 597 ; VMMGCGuestToHostAsmGuestCtx598 ;599 ; Switches from Guest Context to Host Context.600 ; Of course it's only called from within the GC.601 ;602 ; @param eax Return code.603 ; @param esp + 4 Pointer to CPUMCTXCORE.604 ;605 ; @remark ASSUMES interrupts disabled.606 ;607 ALIGNCODE(16)608 BEGINPROC VMMGCGuestToHostAsmGuestCtx609 DEBUG_CHAR('~')610 611 %ifdef VBOX_WITH_STATISTICS612 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC613 mov edx, 0ffffffffh614 STAM_PROFILE_ADV_STOP edx615 616 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu617 mov edx, 0ffffffffh618 STAM_PROFILE_ADV_START edx619 620 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC621 mov edx, 0ffffffffh622 STAM_PROFILE_ADV_START edx623 %endif624 625 ;626 ; Load the CPUMCPU pointer.627 ;628 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0629 mov edx, 0ffffffffh630 631 ; Skip return address (assumes called!)632 lea esp, [esp + 4]633 634 ;635 ; Guest Context (assumes esp now points to CPUMCTXCORE structure).636 ;637 ; general purpose registers.638 push eax639 640 mov eax, [esp + 4 + CPUMCTXCORE.eax]641 mov [edx + CPUMCPU.Guest.eax], eax642 mov eax, [esp + 4 + CPUMCTXCORE.ecx]643 mov [edx + CPUMCPU.Guest.ecx], eax644 mov eax, [esp + 4 + CPUMCTXCORE.edx]645 mov [edx + CPUMCPU.Guest.edx], eax646 mov eax, [esp + 4 + CPUMCTXCORE.ebx]647 mov [edx + CPUMCPU.Guest.ebx], eax648 mov eax, [esp + 4 + CPUMCTXCORE.esp]649 mov [edx + CPUMCPU.Guest.esp], eax650 mov eax, [esp + 4 + CPUMCTXCORE.ebp]651 mov [edx + CPUMCPU.Guest.ebp], eax652 mov eax, [esp + 4 + CPUMCTXCORE.esi]653 mov [edx + CPUMCPU.Guest.esi], eax654 mov eax, [esp + 4 + CPUMCTXCORE.edi]655 mov [edx + CPUMCPU.Guest.edi], eax656 mov eax, dword [esp + 4 + CPUMCTXCORE.es.Sel]657 mov dword [edx + CPUMCPU.Guest.es.Sel], eax658 mov eax, dword [esp + 4 + CPUMCTXCORE.cs.Sel]659 mov dword [edx + CPUMCPU.Guest.cs.Sel], eax660 mov eax, dword [esp + 4 + CPUMCTXCORE.ss.Sel]661 mov dword [edx + CPUMCPU.Guest.ss.Sel], eax662 mov eax, dword [esp + 4 + CPUMCTXCORE.ds.Sel]663 mov dword [edx + CPUMCPU.Guest.ds.Sel], eax664 mov eax, dword [esp + 4 + CPUMCTXCORE.fs.Sel]665 mov dword [edx + CPUMCPU.Guest.fs.Sel], eax666 mov eax, dword [esp + 4 + CPUMCTXCORE.gs.Sel]667 mov dword [edx + CPUMCPU.Guest.gs.Sel], eax668 mov eax, [esp + 4 + CPUMCTXCORE.eflags]669 mov dword [edx + CPUMCPU.Guest.eflags], eax670 mov eax, [esp + 4 + CPUMCTXCORE.eip]671 mov dword [edx + CPUMCPU.Guest.eip], eax672 pop eax673 674 add esp, CPUMCTXCORE_size ; skip CPUMCTXCORE structure675 676 jmp vmmGCGuestToHostAsm_EIPDone677 ENDPROC VMMGCGuestToHostAsmGuestCtx678 679 680 ;;681 ; VMMGCGuestToHostAsmHyperCtx682 ;683 ; This is an alternative entry point which we'll be using684 ; when the we have the hypervisor context and need to save685 ; that before going to the host.686 ;687 ; This is typically useful when abandoning the hypervisor688 ; because of a trap and want the trap state to be saved.689 ;690 ; @param eax Return code.691 ; @param ecx Points to CPUMCTXCORE.692 ; @uses eax,edx,ecx693 ALIGNCODE(16)694 BEGINPROC VMMGCGuestToHostAsmHyperCtx695 DEBUG_CHAR('#')696 697 %ifdef VBOX_WITH_STATISTICS698 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC699 mov edx, 0ffffffffh700 STAM_PROFILE_ADV_STOP edx701 702 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu703 mov edx, 0ffffffffh704 STAM_PROFILE_ADV_START edx705 706 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC707 mov edx, 0ffffffffh708 STAM_PROFILE_ADV_START edx709 %endif710 711 ;712 ; Load the CPUM pointer.713 ;714 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0715 mov edx, 0ffffffffh716 717 push eax ; save return code.718 ; general purpose registers719 mov eax, [ecx + CPUMCTXCORE.edi]720 mov [edx + CPUMCPU.Hyper.edi], eax721 mov eax, [ecx + CPUMCTXCORE.esi]722 mov [edx + CPUMCPU.Hyper.esi], eax723 mov eax, [ecx + CPUMCTXCORE.ebp]724 mov [edx + CPUMCPU.Hyper.ebp], eax725 mov eax, [ecx + CPUMCTXCORE.eax]726 mov [edx + CPUMCPU.Hyper.eax], eax727 mov eax, [ecx + CPUMCTXCORE.ebx]728 mov [edx + CPUMCPU.Hyper.ebx], eax729 mov eax, [ecx + CPUMCTXCORE.edx]730 mov [edx + CPUMCPU.Hyper.edx], eax731 mov eax, [ecx + CPUMCTXCORE.ecx]732 mov [edx + CPUMCPU.Hyper.ecx], eax733 mov eax, [ecx + CPUMCTXCORE.esp]734 mov [edx + CPUMCPU.Hyper.esp], eax735 ; selectors736 mov eax, [ecx + CPUMCTXCORE.ss.Sel]737 mov [edx + CPUMCPU.Hyper.ss.Sel], eax738 mov eax, [ecx + CPUMCTXCORE.gs.Sel]739 mov [edx + CPUMCPU.Hyper.gs.Sel], eax740 mov eax, [ecx + CPUMCTXCORE.fs.Sel]741 mov [edx + CPUMCPU.Hyper.fs.Sel], eax742 mov eax, [ecx + CPUMCTXCORE.es.Sel]743 mov [edx + CPUMCPU.Hyper.es.Sel], eax744 mov eax, [ecx + CPUMCTXCORE.ds.Sel]745 mov [edx + CPUMCPU.Hyper.ds.Sel], eax746 mov eax, [ecx + CPUMCTXCORE.cs.Sel]747 mov [edx + CPUMCPU.Hyper.cs.Sel], eax748 ; flags749 mov eax, [ecx + CPUMCTXCORE.eflags]750 mov [edx + CPUMCPU.Hyper.eflags], eax751 ; eip752 mov eax, [ecx + CPUMCTXCORE.eip]753 mov [edx + CPUMCPU.Hyper.eip], eax754 ; jump to common worker code.755 pop eax ; restore return code.756 jmp vmmGCGuestToHostAsm_SkipHyperRegs757 758 ENDPROC VMMGCGuestToHostAsmHyperCtx759 760 761 ;;762 597 ; VMMGCGuestToHostAsm 763 598 ; … … 794 629 795 630 pop dword [edx + CPUMCPU.Hyper.eip] ; call return from stack 796 jmp short vmmGCGuestToHostAsm_EIPDone 797 798 ALIGNCODE(16) 799 vmmGCGuestToHostAsm_EIPDone: 631 800 632 ; general registers which we care about. 801 633 mov dword [edx + CPUMCPU.Hyper.ebx], ebx … … 806 638 807 639 ; special registers which may change. 808 vmmGCGuestToHostAsm_SkipHyperRegs:809 640 ; str [edx + CPUMCPU.Hyper.tr] - double fault only, and it won't be right then either. 810 641 sldt [edx + CPUMCPU.Hyper.ldtr.Sel] … … 1092 923 at VMMSWITCHERDEF.offGCCallTrampoline, dd NAME(vmmGCCallTrampoline) - NAME(Start) 1093 924 at VMMSWITCHERDEF.offGCGuestToHostAsm, dd NAME(VMMGCGuestToHostAsm) - NAME(Start) 1094 at VMMSWITCHERDEF.offGCGuestToHostAsmHyperCtx, dd NAME(VMMGCGuestToHostAsmHyperCtx)- NAME(Start)1095 at VMMSWITCHERDEF.offGCGuestToHostAsmGuestCtx, dd NAME(VMMGCGuestToHostAsmGuestCtx)- NAME(Start)1096 925 ; disasm help 1097 926 at VMMSWITCHERDEF.offHCCode0, dd 0 -
trunk/src/VBox/VMM/include/VMMSwitcher.h
r41800 r41933 106 106 /** vmmGCGuestToHostAsm assembly entrypoint. */ 107 107 uint32_t offGCGuestToHostAsm; 108 /** vmmGCGuestToHostAsmHyperCtx assembly entrypoint taking HyperCtx. */109 uint32_t offGCGuestToHostAsmHyperCtx;110 /** vmmGCGuestToHostAsmGuestCtx assembly entrypoint taking GuestCtx. */111 uint32_t offGCGuestToHostAsmGuestCtx;112 108 /** @name Disassembly Regions. 113 109 * @{ */ -
trunk/src/VBox/VMM/include/VMMSwitcher.mac
r35333 r41933 50 50 .offGCCallTrampoline: resd 1 51 51 .offGCGuestToHostAsm: resd 1 52 .offGCGuestToHostAsmHyperCtx: resd 153 .offGCGuestToHostAsmGuestCtx: resd 154 52 ; disasm help 55 53 .offHCCode0: resd 1 -
trunk/src/VBox/VMM/testcase/tstMicroRCA.asm
r41906 r41933 127 127 extern NAME(idtOnly42) 128 128 extern IMPNAME(g_VM) 129 extern IMPNAME(VMMGCGuestToHostAsm) 129 130 130 131 BEGINCODE … … 480 481 cld 481 482 483 %if 0 ; this has been broken for quite some time 482 484 ; 483 485 ; Setup CPUMCTXCORE frame … … 501 503 push edi ;3c ; 0h 502 504 ;40 503 504 test byte [esp + CPUMCTXCORE.cs.Sel], 3h ; check CPL of the cs selector 505 %endif 506 507 test byte [esp + 0ch + 4h], 3h ; check CPL of the cs selector 505 508 jmp short tstTrapHandler_Fault_Hyper ;; @todo 506 509 jz short tstTrapHandler_Fault_Hyper … … 509 512 mov edx, IMP(g_VM) 510 513 mov eax, VERR_TRPM_DONT_PANIC 511 call [edx + VM.pfnVMMGCGuestToHostAsm GuestCtx]514 call [edx + VM.pfnVMMGCGuestToHostAsm] 512 515 jmp short tstTrapHandler_Fault_Guest 513 516 … … 521 524 mov edx, IMP(g_VM) 522 525 mov eax, VERR_TRPM_DONT_PANIC 523 call [edx + VM.pfnVMMGCGuestToHostAsm HyperCtx]526 call [edx + VM.pfnVMMGCGuestToHostAsm] 524 527 jmp short tstTrapHandler_Fault_Hyper 525 528 -
trunk/src/VBox/VMM/testcase/tstVMStruct.h
r41931 r41933 1321 1321 GEN_CHECK_OFF(VM, cbSelf); 1322 1322 GEN_CHECK_OFF(VM, offVMCPU); 1323 GEN_CHECK_OFF(VM, pfnVMMGCGuestToHostAsmGuestCtx);1324 GEN_CHECK_OFF(VM, pfnVMMGCGuestToHostAsmHyperCtx);1325 1323 GEN_CHECK_OFF(VM, pfnVMMGCGuestToHostAsm); 1326 1324 GEN_CHECK_OFF(VM, fRecompileUser); … … 1333 1331 GEN_CHECK_OFF(VM, fUseLargePages); 1334 1332 GEN_CHECK_OFF(VM, hTraceBufRC); 1335 GEN_CHECK_OFF(VM, uPadding3);1336 1333 GEN_CHECK_OFF(VM, hTraceBufR3); 1337 1334 GEN_CHECK_OFF(VM, hTraceBufR0);
Note:
See TracChangeset
for help on using the changeset viewer.