Changeset 87412 in vbox
- Timestamp:
- Jan 25, 2021 10:58:45 AM (4 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMR0A.asm
r87411 r87412 569 569 ; @returns eax 570 570 ; 571 ; @param pVM msc:rcx 572 ; @param pVCpu msc:rdx The cross context virtual CPU structure of the calling EMT. 573 ; @param fResumeVM msc:r8l 574 ; @param pfnStartVM msc:r9 575 ; 576 ; Old: 571 577 ; @param fResumeVM msc:rcx 572 578 ; @param pCtx msc:rdx … … 594 600 595 601 ; Spill input parameters. 596 mov [xBP + 010h], rcx ; fResumeVM597 mov [xBP + 018h], rdx ; p Ctx598 mov [xBP + 020h], r8 ; pvUnused599 mov [xBP + 028h], r9 ; p VM602 mov [xBP + 010h], rcx ; pVM 603 mov [xBP + 018h], rdx ; pVCpu 604 mov [xBP + 020h], r8 ; fResumeVM 605 mov [xBP + 028h], r9 ; pfnStartVM 600 606 601 607 ; Ask CPUM whether we've started using the FPU yet. 602 mov rcx, [xBP + 30h]; pVCpu608 mov rcx, [xBP + 018h] ; pVCpu 603 609 call NAME(CPUMIsGuestFPUStateActive) 604 610 test al, al … … 606 612 607 613 ; No need to mess with XMM registers just call the start routine and return. 608 mov r11, [xBP + 38h] ; pfnStartVM 609 mov r10, [xBP + 30h] ; pVCpu 610 mov [xSP + 020h], r10 611 mov rcx, [xBP + 010h] ; fResumeVM 612 mov rdx, [xBP + 018h] ; pCtx 613 mov r8, [xBP + 020h] ; pvUnused 614 mov r9, [xBP + 028h] ; pVM 615 call r11 614 mov r9, [xBP + 028h] ; pfnStartVM 615 mov rcx, [xBP + 010h] ; pVM 616 mov rdx, [xBP + 018h] ; pVCpu 617 mov r8, [xBP + 020h] ; fResume 618 call r9 616 619 617 620 leave … … 633 636 stmxcsr [rsp + 040h + 0a0h] 634 637 635 mov r10, [xBP + 018h] ; p Ctx636 mov eax, [r10 + CPUMCTX.fXStateMask]638 mov r10, [xBP + 018h] ; pVCpu 639 mov eax, [r10 + VMCPU.cpum.GstCtx + CPUMCTX.fXStateMask] 637 640 test eax, eax 638 641 jz .guest_fpu_state_manually … … 643 646 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS 644 647 xor edx, edx 645 mov r10, [r10 + CPUMCTX.pXStateR0]648 mov r10, [r10 + VMCPU.cpum.GstCtx + CPUMCTX.pXStateR0] 646 649 xrstor [r10] 647 650 648 651 ; Make the call (same as in the other case). 649 mov r11, [xBP + 38h] ; pfnStartVM 650 mov r10, [xBP + 30h] ; pVCpu 651 mov [xSP + 020h], r10 652 mov rcx, [xBP + 010h] ; fResumeVM 653 mov rdx, [xBP + 018h] ; pCtx 654 mov r8, [xBP + 020h] ; pvUnused 655 mov r9, [xBP + 028h] ; pVM 656 call r11 652 mov r9, [xBP + 028h] ; pfnStartVM 653 mov rcx, [xBP + 010h] ; pVM 654 mov rdx, [xBP + 018h] ; pVCpu 655 mov r8, [xBP + 020h] ; fResume 656 call r9 657 657 658 658 mov r11d, eax ; save return value (xsave below uses eax) 659 659 660 660 ; Save the guest XMM registers. 661 mov r10, [xBP + 018h] ; p Ctx662 mov eax, [r10 + CPUMCTX.fXStateMask]661 mov r10, [xBP + 018h] ; pVCpu 662 mov eax, [r10 + VMCPU.cpum.GstCtx + CPUMCTX.fXStateMask] 663 663 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS 664 664 xor edx, edx 665 mov r10, [r10 + CPUMCTX.pXStateR0]665 mov r10, [r10 + VMCPU.cpum.GstCtx + CPUMCTX.pXStateR0] 666 666 xsave [r10] 667 667 … … 689 689 .guest_fpu_state_manually: 690 690 ; Load the full guest XMM register state. 691 mov r10, [r10 + CPUMCTX.pXStateR0]691 mov r10, [r10 + VMCPU.cpum.GstCtx + CPUMCTX.pXStateR0] 692 692 movdqa xmm0, [r10 + XMM_OFF_IN_X86FXSTATE + 000h] 693 693 movdqa xmm1, [r10 + XMM_OFF_IN_X86FXSTATE + 010h] … … 709 709 710 710 ; Make the call (same as in the other case). 711 mov r11, [xBP + 38h] ; pfnStartVM 712 mov r10, [xBP + 30h] ; pVCpu 713 mov [xSP + 020h], r10 714 mov rcx, [xBP + 010h] ; fResumeVM 715 mov rdx, [xBP + 018h] ; pCtx 716 mov r8, [xBP + 020h] ; pvUnused 717 mov r9, [xBP + 028h] ; pVM 718 call r11 711 mov r9, [xBP + 028h] ; pfnStartVM 712 mov rcx, [xBP + 010h] ; pVM 713 mov rdx, [xBP + 018h] ; pVCpu 714 mov r8, [xBP + 020h] ; fResume 715 call r9 719 716 720 717 ; Save the guest XMM registers. 721 mov r10, [xBP + 018h] ; p Ctx722 mov r10, [r10 + CPUMCTX.pXStateR0]718 mov r10, [xBP + 018h] ; pVCpu 719 mov r10, [r10 + VMCPU.cpum.GstCtx + CPUMCTX.pXStateR0] 723 720 stmxcsr [r10 + X86FXSTATE.MXCSR] 724 721 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 000h], xmm0 … … 758 755 %endif 759 756 757 ; Save the guest state. 760 758 push xDI 761 759 %ifndef VMX_SKIP_TR … … 846 844 ; 847 845 ; @returns VBox status code 848 ; @param fResume msc:rcx, gcc:rdi Whether to use vmlauch/vmresume. 849 ; @param pCtx msc:rdx, gcc:rsi Pointer to the guest-CPU context. 850 ; @param pvUnused msc:r8, gcc:rdx Unused argument. 851 ; @param pVM msc:r9, gcc:rcx The cross context VM structure. 852 ; @param pVCpu msc:[ebp+30], gcc:r8 The cross context virtual CPU structure of the calling EMT. 853 ; 854 ALIGNCODE(16) 855 BEGINPROC VMXR0StartVM64 846 ; @param pVM msc:rcx, gcc:rdi The cross context VM structure. (unused) 847 ; @param pVCpu msc:rdx, gcc:rsi The cross context virtual CPU structure of the calling EMT. 848 ; @param fResume msc:r8l, gcc:dl Whether to use vmlauch/vmresume. 849 ; 850 ALIGNCODE(64) 851 BEGINPROC hmR0VMXStartVM 856 852 push xBP 857 853 mov xBP, xSP … … 861 857 862 858 ; Save all general purpose host registers. 863 %assign cbFrame 0859 %assign cbFrame 8 864 860 PUSH_CALLEE_PRESERVED_REGISTERS 865 861 SEH64_END_PROLOGUE … … 872 868 873 869 ; 874 ; Unify the input parameter registers .870 ; Unify the input parameter registers: rsi=pVCpu, bl=fResume, rdi=&pVCpu->cpum.GstCtx; 875 871 ; 876 872 %ifdef ASM_CALL64_GCC 877 ; fResume already in rdi 878 ; pCtx already in rsi 879 mov rbx, rdx ; pvUnused 873 mov ebx, edx ; fResume 880 874 %else 881 mov rdi, rcx ; fResume 882 mov rsi, rdx ; pCtx 883 mov rbx, r8 ; pvUnused 875 mov rsi, rdx ; pVCpu 876 mov ebx, r8d ; fResume 884 877 %endif 878 lea rdi, [rsi + VMCPU.cpum.GstCtx] 885 879 886 880 ; … … 888 882 ; Note! Trashes rdx and rcx. 889 883 ; 890 %ifdef ASM_CALL64_MSC 891 mov rax, [xBP + 30h] ; pVCpu 892 %else 893 mov rax, r8 ; pVCpu 894 %endif 895 test byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1 884 test byte [rsi + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1 896 885 jz .xcr0_before_skip 897 886 … … 901 890 push xAX 902 891 903 mov eax, [ xSI+ CPUMCTX.aXcr] ; load the guest one904 mov edx, [ xSI+ CPUMCTX.aXcr + 4]892 mov eax, [rdi + CPUMCTX.aXcr] ; load the guest one 893 mov edx, [rdi + CPUMCTX.aXcr + 4] 905 894 xor ecx, ecx ; paranoia 906 895 xsetbv … … 920 909 921 910 ; Save the pCtx pointer. 922 push xSI911 push rdi 923 912 924 913 ; Save host LDTR. … … 944 933 945 934 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction). 946 mov r bx, qword [xSI+ CPUMCTX.cr2]935 mov rcx, qword [rdi + CPUMCTX.cr2] 947 936 mov rdx, cr2 948 cmp r bx, rdx937 cmp rcx, rdx 949 938 je .skip_cr2_write 950 mov cr2, r bx939 mov cr2, rcx 951 940 952 941 .skip_cr2_write: … … 957 946 958 947 ; Fight spectre and similar. 959 INDIRECT_BRANCH_PREDICTION_AND_L1_CACHE_BARRIER xSI, CPUMCTX_WSF_IBPB_ENTRY, CPUMCTX_WSF_L1D_ENTRY, CPUMCTX_WSF_MDS_ENTRY 948 INDIRECT_BRANCH_PREDICTION_AND_L1_CACHE_BARRIER rdi, CPUMCTX_WSF_IBPB_ENTRY, CPUMCTX_WSF_L1D_ENTRY, CPUMCTX_WSF_MDS_ENTRY 949 950 ; Resume or start VM? 951 cmp bl, 0 ; fResume 960 952 961 953 ; Load guest general purpose registers. 962 mov rax, qword [xSI + CPUMCTX.eax] 963 mov rbx, qword [xSI + CPUMCTX.ebx] 964 mov rcx, qword [xSI + CPUMCTX.ecx] 965 mov rdx, qword [xSI + CPUMCTX.edx] 966 mov rbp, qword [xSI + CPUMCTX.ebp] 967 mov r8, qword [xSI + CPUMCTX.r8] 968 mov r9, qword [xSI + CPUMCTX.r9] 969 mov r10, qword [xSI + CPUMCTX.r10] 970 mov r11, qword [xSI + CPUMCTX.r11] 971 mov r12, qword [xSI + CPUMCTX.r12] 972 mov r13, qword [xSI + CPUMCTX.r13] 973 mov r14, qword [xSI + CPUMCTX.r14] 974 mov r15, qword [xSI + CPUMCTX.r15] 975 976 ; Resume or start VM? 977 cmp xDI, 0 ; fResume 978 979 ; Load guest rdi & rsi. 980 mov rdi, qword [xSI + CPUMCTX.edi] 981 mov rsi, qword [xSI + CPUMCTX.esi] 954 mov rax, qword [rdi + CPUMCTX.eax] 955 mov rbx, qword [rdi + CPUMCTX.ebx] 956 mov rcx, qword [rdi + CPUMCTX.ecx] 957 mov rdx, qword [rdi + CPUMCTX.edx] 958 mov rbp, qword [rdi + CPUMCTX.ebp] 959 mov rsi, qword [rdi + CPUMCTX.esi] 960 mov r8, qword [rdi + CPUMCTX.r8] 961 mov r9, qword [rdi + CPUMCTX.r9] 962 mov r10, qword [rdi + CPUMCTX.r10] 963 mov r11, qword [rdi + CPUMCTX.r11] 964 mov r12, qword [rdi + CPUMCTX.r12] 965 mov r13, qword [rdi + CPUMCTX.r13] 966 mov r14, qword [rdi + CPUMCTX.r14] 967 mov r15, qword [rdi + CPUMCTX.r15] 968 mov rdi, qword [rdi + CPUMCTX.edi] 982 969 983 970 je .vmlaunch64_launch … … 986 973 jc near .vmxstart64_invalid_vmcs_ptr 987 974 jz near .vmxstart64_start_failed 988 jmp .vmlaunch64_done ;; here if vmresume detected a failure975 jmp .vmlaunch64_done ; here if vmresume detected a failure 989 976 990 977 .vmlaunch64_launch: … … 992 979 jc near .vmxstart64_invalid_vmcs_ptr 993 980 jz near .vmxstart64_start_failed 994 jmp .vmlaunch64_done ;; here if vmlaunch detected a failure995 996 ALIGNCODE( 16)981 jmp .vmlaunch64_done ; here if vmlaunch detected a failure 982 983 ALIGNCODE(64) 997 984 .vmlaunch64_done: 998 985 RESTORE_STATE_VM64 … … 1013 1000 mov eax, VERR_VMX_UNABLE_TO_START_VM 1014 1001 jmp .vmstart64_end 1015 ENDPROC VMXR0StartVM641002 ENDPROC hmR0VMXStartVM 1016 1003 1017 1004 -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r87408 r87412 4162 4162 * Currently we have just a single handler for all guest modes as well, see @bugref{6208#c73}. 4163 4163 */ 4164 pVmcsInfo->pfnStartVM = VMXR0StartVM64;4164 pVmcsInfo->pfnStartVM = hmR0VMXStartVM; 4165 4165 if (!fIsNstGstVmcs) 4166 4166 { … … 6784 6784 PVMCC pVM = pVCpu->CTX_SUFF(pVM); 6785 6785 #ifdef VBOX_WITH_KERNEL_USING_XMM 6786 int rc = hmR0VMXStartVMWrapXMM( fResumeVM, pCtx, NULL /*pvUnused*/, pVM, pVCpu, pVmcsInfo->pfnStartVM);6786 int rc = hmR0VMXStartVMWrapXMM(pVM, pVCpu, fResumeVM, pVmcsInfo->pfnStartVM); 6787 6787 #else 6788 int rc = pVmcsInfo->pfnStartVM( fResumeVM, pCtx, NULL /*pvUnused*/, pVM, pVCpu);6788 int rc = pVmcsInfo->pfnStartVM(pVM, pVCpu, fResumeVM); 6789 6789 #endif 6790 6790 AssertMsg(rc <= VINF_SUCCESS, ("%Rrc\n", rc)); -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.h
r83057 r87412 46 46 VMMR0DECL(int) VMXR0ImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat); 47 47 VMMR0DECL(VBOXSTRICTRC) VMXR0RunGuestCode(PVMCPUCC pVCpu); 48 DECLASM(int) VMXR0StartVM64(RTHCUINT fResume, PCPUMCTX pCtx, void *pvUnused, PVMCC pVM, PVMCPUCC pVCpu);49 48 #endif /* IN_RING0 */ 50 49 -
trunk/src/VBox/VMM/include/HMInternal.h
r87408 r87412 702 702 * 703 703 * @returns VBox status code (no informational stuff). 704 * @param fResume Whether to use VMRESUME (true) or VMLAUNCH (false).705 * @param pCtx The CPU register context.706 * @param pvUnused Unused argument.707 704 * @param pVM Pointer to the cross context VM structure. 708 705 * @param pVCpu Pointer to the cross context per-CPU structure. 709 */ 710 typedef DECLCALLBACKTYPE(int, FNHMVMXSTARTVM,(RTHCUINT fResume, PCPUMCTX pCtx, void *pvUnused, PVMCC pVM, PVMCPUCC pVCpu)); 706 * @param fResume Whether to use VMRESUME (true) or VMLAUNCH (false). 707 */ 708 typedef DECLCALLBACKTYPE(int, FNHMVMXSTARTVM,(PVMCC pVM, PVMCPUCC pVCpu, RTHCUINT fResume)); 711 709 /** Pointer to a VMX StartVM function. */ 712 710 typedef R0PTRTYPE(FNHMVMXSTARTVM *) PFNHMVMXSTARTVM; … … 1360 1358 1361 1359 # ifdef VBOX_WITH_KERNEL_USING_XMM 1362 DECLASM(int) hmR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, void *pvUnused, PVMCC pVM, PVMCPUCC pVCpu, 1363 PFNHMVMXSTARTVM pfnStartVM); 1360 DECLASM(int) hmR0VMXStartVMWrapXMM(PVMCC pVM, PVMCPUCC pVCpu, RTHCUINT fResume, PFNHMVMXSTARTVM pfnStartVM); 1364 1361 # endif 1362 DECLASM(int) hmR0VMXStartVM(PVMCC pVM, PVMCPUCC pVCpu, RTHCUINT fResume); 1363 1365 1364 /** @} */ 1366 1365
Note:
See TracChangeset
for help on using the changeset viewer.