Changeset 87428 in vbox
- Timestamp:
- Jan 26, 2021 10:59:27 AM (4 years ago)
- svn:sync-xref-src-repo-rev:
- 142434
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMR0A.asm
r87417 r87428 47 47 ; Determine skipping restoring of GDTR, IDTR, TR across VMX non-root operation. 48 48 ; 49 ; @note This is normally done by hmR0VmxExportHostSegmentRegs and VMXRestoreHostState, 50 ; so much of this is untested code. 51 ; @{ 49 52 %define VMX_SKIP_GDTR 50 53 %define VMX_SKIP_TR … … 54 57 ; risk loading a stale LDT value or something invalid. 55 58 %define HM_64_BIT_USE_NULL_SEL 56 ; Darwin (Mavericks) uses IDTR limit to store the CPU Id so we need to restore it always.59 ; Darwin (Mavericks) uses IDTR limit to store the CPU number so we need to always restore it. 57 60 ; See @bugref{6875}. 61 %undef VMX_SKIP_IDTR 58 62 %else 59 63 %define VMX_SKIP_IDTR 60 64 %endif 61 65 ;; @} 62 66 63 67 ;; @def CALLEE_PRESERVED_REGISTER_COUNT … … 143 147 %endmacro 144 148 149 145 150 ;; @def PUSH_RELEVANT_SEGMENT_REGISTERS 146 151 ; Macro saving all segment registers on the stack. 147 152 ; @param 1 Full width register name. 148 153 ; @param 2 16-bit register name for \a 1. 149 150 ;; @def POP_RELEVANT_SEGMENT_REGISTERS 151 ; Macro restoring all segment registers on the stack. 152 ; @param 1 Full width register name. 153 ; @param 2 16-bit register name for \a 1. 154 %ifdef VBOX_SKIP_RESTORE_SEG 155 %macro PUSH_RELEVANT_SEGMENT_REGISTERS 2 156 %endmacro 157 158 %macro POP_RELEVANT_SEGMENT_REGISTERS 2 159 %endmacro 160 %else ; !VBOX_SKIP_RESTORE_SEG 161 ; Trashes, rax, rdx & rcx. 162 %macro PUSH_RELEVANT_SEGMENT_REGISTERS 2 154 ; @cobbers rax, rdx, rcx 155 %macro PUSH_RELEVANT_SEGMENT_REGISTERS 2 156 %ifndef VBOX_SKIP_RESTORE_SEG 157 %error untested code. probably does not work any more! 163 158 %ifndef HM_64_BIT_USE_NULL_SEL 164 159 mov %2, es … … 187 182 push gs 188 183 %endif 189 %endmacro 190 191 ; trashes, rax, rdx & rcx 192 %macro POP_RELEVANT_SEGMENT_REGISTERS 2 184 %endif ; !VBOX_SKIP_RESTORE_SEG 185 %endmacro ; PUSH_RELEVANT_SEGMENT_REGISTERS 186 187 ;; @def POP_RELEVANT_SEGMENT_REGISTERS 188 ; Macro restoring all segment registers on the stack. 189 ; @param 1 Full width register name. 190 ; @param 2 16-bit register name for \a 1. 191 ; @cobbers rax, rdx, rcx 192 %macro POP_RELEVANT_SEGMENT_REGISTERS 2 193 %ifndef VBOX_SKIP_RESTORE_SEG 194 %error untested code. probably does not work any more! 193 195 ; Note: do not step through this code with a debugger! 194 196 %ifndef HM_64_BIT_USE_NULL_SEL … … 223 225 mov es, %2 224 226 %endif 225 %end macro226 %end if ; VBOX_SKIP_RESTORE_SEG227 %endif ; !VBOX_SKIP_RESTORE_SEG 228 %endmacro ; POP_RELEVANT_SEGMENT_REGISTERS 227 229 228 230 … … 740 742 741 743 742 ;; @def RESTORE_STATE_VM64743 ; Macro restoring essential host state and updating guest state744 ; for 64-bit host, 64-bit guest for VT-x.745 ;746 %macro RESTORE_STATE_VM64 0747 ; Restore base and limit of the IDTR & GDTR.748 %ifndef VMX_SKIP_IDTR749 lidt [xSP]750 add xSP, xCB * 2751 %endif752 %ifndef VMX_SKIP_GDTR753 lgdt [xSP]754 add xSP, xCB * 2755 %endif756 757 ; Save the guest state.758 push xDI759 %ifndef VMX_SKIP_TR760 mov xDI, [xSP + xCB * 3] ; pCtx (*3 to skip the saved xDI, TR, LDTR)761 %else762 mov xDI, [xSP + xCB * 2] ; pCtx (*2 to skip the saved xDI, LDTR)763 %endif764 765 mov qword [xDI + CPUMCTX.eax], rax766 mov rax, SPECTRE_FILLER767 mov qword [xDI + CPUMCTX.ebx], rbx768 mov rbx, rax769 mov qword [xDI + CPUMCTX.ecx], rcx770 mov rcx, rax771 mov qword [xDI + CPUMCTX.edx], rdx772 mov rdx, rax773 mov qword [xDI + CPUMCTX.esi], rsi774 mov rsi, rax775 mov qword [xDI + CPUMCTX.ebp], rbp776 mov rbp, rax777 mov qword [xDI + CPUMCTX.r8], r8778 mov r8, rax779 mov qword [xDI + CPUMCTX.r9], r9780 mov r9, rax781 mov qword [xDI + CPUMCTX.r10], r10782 mov r10, rax783 mov qword [xDI + CPUMCTX.r11], r11784 mov r11, rax785 mov qword [xDI + CPUMCTX.r12], r12786 mov r12, rax787 mov qword [xDI + CPUMCTX.r13], r13788 mov r13, rax789 mov qword [xDI + CPUMCTX.r14], r14790 mov r14, rax791 mov qword [xDI + CPUMCTX.r15], r15792 mov r15, rax793 mov rax, cr2794 mov qword [xDI + CPUMCTX.cr2], rax795 796 pop xAX ; The guest rdi we pushed above797 mov qword [xDI + CPUMCTX.edi], rax798 799 ; Fight spectre.800 INDIRECT_BRANCH_PREDICTION_BARRIER_CTX xDI, CPUMCTX_WSF_IBPB_EXIT801 802 %ifndef VMX_SKIP_TR803 ; Restore TSS selector; must mark it as not busy before using ltr!804 ; ASSUME that this is supposed to be 'BUSY' (saves 20-30 ticks on the T42p).805 ; @todo get rid of sgdt806 pop xBX ; Saved TR807 sub xSP, xCB * 2808 sgdt [xSP]809 mov xAX, xBX810 and eax, X86_SEL_MASK_OFF_RPL ; mask away TI and RPL bits leaving only the descriptor offset811 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset812 and dword [xAX + 4], ~RT_BIT(9) ; clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit)813 ltr bx814 add xSP, xCB * 2815 %endif816 817 pop xAX ; Saved LDTR818 cmp eax, 0819 je %%skip_ldt_write64820 lldt ax821 822 %%skip_ldt_write64:823 pop xSI ; pCtx (needed in rsi by the macros below)824 825 ; Restore segment registers.826 POP_RELEVANT_SEGMENT_REGISTERS xAX, ax827 828 ; Restore the host XCR0 if necessary.829 pop xCX830 test ecx, ecx831 jnz %%xcr0_after_skip832 pop xAX833 pop xDX834 xsetbv ; ecx is already zero.835 %%xcr0_after_skip:836 837 ; Restore general purpose registers.838 POP_CALLEE_PRESERVED_REGISTERS839 %endmacro840 841 842 744 ;; 843 745 ; Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode) … … 852 754 push xBP 853 755 mov xBP, xSP 854 756 SEH64_SET_FRAME_xBP 0 855 757 pushf 856 758 cli 857 759 760 %define frm_fRFlags -008h 761 %define frm_pGstCtx -010h ; Where we stash guest CPU context for use after the vmrun. 762 %define frm_uHostXcr0 -020h ; 128-bit 763 %define frm_saved_gdtr -036h ; 16+64: Only used when VMX_SKIP_GDTR isn't defined 764 %define frm_saved_tr -034h ; 16-bit: Only used when VMX_SKIP_TR isn't defined 765 %define frm_fNoRestoreXcr0 -030h ; 32-bit: Non-zero if we should skip XCR0 restoring. 766 %define frm_saved_idtr -046h ; 16+64: Only used when VMX_SKIP_IDTR isn't defined 767 %define frm_saved_ldtr -044h ; 16-bit: always saved. 768 %define frm_rcError -040h ; 32-bit: Error status code (not used in the success path) 769 %define frm_guest_rax -048h ; Temporary storage slot for guest RAX. 770 %assign cbFrame 050h 771 sub rsp, cbFrame - 8 772 858 773 ; Save all general purpose host registers. 859 %assign cbFrame 8860 774 PUSH_CALLEE_PRESERVED_REGISTERS 775 ;PUSH_RELEVANT_SEGMENT_REGISTERS xAX, ax - currently broken 861 776 SEH64_END_PROLOGUE 862 777 863 ; First we have to save some final CPU context registers.864 lea r10, [.vmlaunch64_done wrt rip]865 mov rax, VMX_VMCS_HOST_RIP ; return address (too difficult to continue after VMLAUNCH?)866 vmwrite rax, r10867 ; Note: ASSUMES success!868 869 778 ; 870 779 ; Unify the input parameter registers: rsi=pVCpu, bl=fResume, rdi=&pVCpu->cpum.GstCtx; 871 780 ; 872 %ifdef ASM_CALL64_GCC781 %ifdef ASM_CALL64_GCC 873 782 mov ebx, edx ; fResume 874 %else783 %else 875 784 mov rsi, rdx ; pVCpu 876 785 mov ebx, r8d ; fResume 877 %endif786 %endif 878 787 lea rdi, [rsi + VMCPU.cpum.GstCtx] 788 mov [rbp + frm_pGstCtx], rdi 879 789 880 790 ; 881 791 ; Save the host XCR0 and load the guest one if necessary. 882 ; Note! Trashes rdx and rcx. 883 ; 792 ; Note! Trashes rax, rdx and rcx. 793 ; 794 mov ecx, 3fh ; indicate that we need not restore XCR0 884 795 test byte [rsi + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1 885 jz .xcr0_before_ skip796 jz .xcr0_before_done 886 797 887 798 xor ecx, ecx 888 799 xgetbv ; save the host one on the stack 889 push xDX890 push xAX800 mov [rbp + frm_uHostXcr0], eax 801 mov [rbp + frm_uHostXcr0 + 4], edx 891 802 892 803 mov eax, [rdi + CPUMCTX.aXcr] ; load the guest one 893 804 mov edx, [rdi + CPUMCTX.aXcr + 4] 894 xor ecx, ecx ; paranoia 805 xor ecx, ecx ; paranoia; indicate that we must restore XCR0 (popped into ecx, thus 0) 895 806 xsetbv 896 897 push 0 ; indicate that we must restore XCR0 (popped into ecx, thus 0)898 jmp .xcr0_before_done899 900 .xcr0_before_skip:901 push 3fh ; indicate that we need not902 807 .xcr0_before_done: 903 904 ; 905 ; Save segment registers. 906 ; Note! Trashes rdx & rcx, so we moved it here (amd64 case). 907 ; 908 PUSH_RELEVANT_SEGMENT_REGISTERS xAX, ax 909 910 ; Save the pCtx pointer. 911 push rdi 808 mov [rbp + frm_fNoRestoreXcr0], ecx ; only 32-bit! 912 809 913 810 ; Save host LDTR. 914 xor eax, eax 915 sldt ax 916 push xAX 917 918 %ifndef VMX_SKIP_TR 811 sldt [rbp + frm_saved_tr] 812 813 %ifndef VMX_SKIP_TR 919 814 ; The host TR limit is reset to 0x67; save & restore it manually. 920 str eax 921 push xAX 922 %endif 923 924 %ifndef VMX_SKIP_GDTR 815 str word [rbp + frm_saved_tr] 816 %endif 817 818 %ifndef VMX_SKIP_GDTR 925 819 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly! 926 sub xSP, xCB * 2 927 sgdt [xSP] 928 %endif 929 %ifndef VMX_SKIP_IDTR 930 sub xSP, xCB * 2 931 sidt [xSP] 932 %endif 820 sgdt [rbp + frm_saved_gdtr] 821 %endif 822 %ifndef VMX_SKIP_IDTR 823 sidt [rbp + frm_saved_idtr] 824 %endif 933 825 934 826 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction). … … 940 832 941 833 .skip_cr2_write: 942 mov eax, VMX_VMCS_HOST_RSP 943 vmwrite xAX, xSP 944 ; Note: ASSUMES success! 945 ; Don't mess with ESP anymore!!! 946 947 ; Fight spectre and similar. 834 ; Set the vmlaunch/vmresume "return" host RIP and RSP values. 835 lea rcx, [hmR0VMXStartVMHostRIP wrt rip] ;; @todo It is only strictly necessary to write VMX_VMCS_HOST_RIP when 836 mov eax, VMX_VMCS_HOST_RIP ;; the VMXVMCSINFO::pfnStartVM function changes (eventually 837 vmwrite rax, rcx ;; take the Windows/SSE stuff into account then)... 838 %ifdef VBOX_STRICT 839 jna hmR0VMXStartVMHostRIP.vmwrite_failed 840 %endif 841 mov edx, VMX_VMCS_HOST_RSP ;; @todo The HOST_RSP value is unlikely to change much, so if vmwrite 842 vmwrite rdx, rsp ;; can be noticably more expensive than a memory read, we could 843 %ifdef VBOX_STRICT ;; easily optimize this one away almost completely by comparing 844 jna hmR0VMXStartVMHostRIP.vmwrite_failed ;; rsp with a shadow copy of VMX_VMCS_HOST_RSP. 845 %endif 846 847 ; Fight spectre and similar. Trashes rax, rcx, and rdx. 948 848 INDIRECT_BRANCH_PREDICTION_AND_L1_CACHE_BARRIER rdi, CPUMCTX_WSF_IBPB_ENTRY, CPUMCTX_WSF_L1D_ENTRY, CPUMCTX_WSF_MDS_ENTRY 949 849 … … 971 871 972 872 vmresume 973 jc near.vmxstart64_invalid_vmcs_ptr974 jz near.vmxstart64_start_failed975 jmp .vmlaunch64_done; here if vmresume detected a failure873 jc hmR0VMXStartVMHostRIP.vmxstart64_invalid_vmcs_ptr 874 jz hmR0VMXStartVMHostRIP.vmxstart64_start_failed 875 jmp hmR0VMXStartVMHostRIP ; here if vmresume detected a failure 976 876 977 877 .vmlaunch64_launch: 978 878 vmlaunch 979 jc near.vmxstart64_invalid_vmcs_ptr980 jz near.vmxstart64_start_failed981 jmp .vmlaunch64_done; here if vmlaunch detected a failure879 jc hmR0VMXStartVMHostRIP.vmxstart64_invalid_vmcs_ptr 880 jz hmR0VMXStartVMHostRIP.vmxstart64_start_failed 881 jmp hmR0VMXStartVMHostRIP ; here if vmlaunch detected a failure 982 882 983 883 ALIGNCODE(64) 984 .vmlaunch64_done: 985 RESTORE_STATE_VM64 884 GLOBALNAME hmR0VMXStartVMHostRIP 885 886 ;; 887 ; Common restore logic for success and error paths. We duplicate this because we 888 ; don't want to waste writing the VINF_SUCCESS return value to the stack in the 889 ; regular code path. 890 ; 891 ; @param 1 Zero if regular return, non-zero if error return. Controls label emission. 892 ; 893 ; @note Important that this does not modify cbFrame or rsp. 894 %macro RESTORE_STATE_VMX 1 895 ; Restore base and limit of the IDTR & GDTR. 896 %ifndef VMX_SKIP_IDTR 897 lidt [rsp + cbFrame + frm_saved_idtr] 898 %endif 899 %ifndef VMX_SKIP_GDTR 900 lgdt [rsp + cbFrame + frm_saved_gdtr] 901 %endif 902 903 ; Save the guest state and restore the non-volatile registers. We use rax=pGstCtx here. 904 mov [rsp + cbFrame + frm_guest_rax], rax 905 mov rax, [rsp + cbFrame + frm_pGstCtx] 906 907 mov qword [rax + CPUMCTX.ebp], rbp 908 lea rbp, [rsp + cbFrame] ; re-establish the frame pointer as early as possible. 909 mov qword [rax + CPUMCTX.ecx], rcx 910 mov rcx, SPECTRE_FILLER 911 mov qword [rax + CPUMCTX.edx], rdx 912 mov rdx, [rbp + frm_guest_rax] 913 mov qword [rax + CPUMCTX.eax], rdx 914 mov rdx, rcx 915 mov qword [rax + CPUMCTX.r8], r8 916 mov r8, rcx 917 mov qword [rax + CPUMCTX.r9], r9 918 mov r9, rcx 919 mov qword [rax + CPUMCTX.r10], r10 920 mov r10, rcx 921 mov qword [rax + CPUMCTX.r11], r11 922 mov r11, rcx 923 mov qword [rax + CPUMCTX.esi], rsi 924 %ifdef ASM_CALL64_MSC 925 mov rsi, [rbp + frm_saved_rsi] 926 %else 927 mov rbx, rcx 928 %endif 929 mov qword [rax + CPUMCTX.edi], rdi 930 %ifdef ASM_CALL64_MSC 931 mov rdi, [rbp + frm_saved_rdi] 932 %else 933 mov rbx, rcx 934 %endif 935 mov qword [rax + CPUMCTX.ebx], rbx 936 mov rbx, [rbp + frm_saved_rbx] 937 mov qword [rax + CPUMCTX.r12], r12 938 mov r12, [rbp + frm_saved_r12] 939 mov qword [rax + CPUMCTX.r13], r13 940 mov r13, [rbp + frm_saved_r13] 941 mov qword [rax + CPUMCTX.r14], r14 942 mov r14, [rbp + frm_saved_r14] 943 mov qword [rax + CPUMCTX.r15], r15 944 mov r15, [rbp + frm_saved_r15] 945 946 mov rdx, cr2 947 mov qword [rax + CPUMCTX.cr2], rdx 948 mov rdx, rcx 949 950 %if %1 = 0 ; Skip this in failure branch (=> guru) 951 ; Fight spectre. 952 INDIRECT_BRANCH_PREDICTION_BARRIER_CTX rax, CPUMCTX_WSF_IBPB_EXIT 953 %endif 954 955 %ifndef VMX_SKIP_TR 956 ; Restore TSS selector; must mark it as not busy before using ltr! 957 ; ASSUME that this is supposed to be 'BUSY' (saves 20-30 ticks on the T42p). 958 %ifndef VMX_SKIP_GDTR 959 lgdt [rbp + frm_saved_gdtr] 960 %endif 961 movzx eax, word [rbp + frm_saved_tr] 962 mov ecx, eax 963 and eax, X86_SEL_MASK_OFF_RPL ; mask away TI and RPL bits leaving only the descriptor offset 964 add rax, [rbp + frm_saved_gdtr + 2] ; eax <- GDTR.address + descriptor offset 965 and dword [rax + 4], ~RT_BIT(9) ; clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit) 966 ltr cx 967 %endif 968 movzx edx, word [rbp + frm_saved_ldtr] 969 test edx, edx 970 jz %%skip_ldt_write 971 lldt dx 972 %%skip_ldt_write: 973 974 %if %1 != 0 975 .return_after_vmwrite_error: 976 %endif 977 ; Restore segment registers. 978 ;POP_RELEVANT_SEGMENT_REGISTERS rax, ax - currently broken. 979 980 ; Restore the host XCR0 if necessary. 981 mov ecx, [rbp + frm_fNoRestoreXcr0] 982 test ecx, ecx 983 jnz %%xcr0_after_skip 984 mov eax, [rbp + frm_uHostXcr0] 985 mov edx, [rbp + frm_uHostXcr0 + 4] 986 xsetbv ; ecx is already zero. 987 %%xcr0_after_skip: 988 989 %endmacro ; RESTORE_STATE_VMX 990 991 RESTORE_STATE_VMX 0 986 992 mov eax, VINF_SUCCESS 987 993 988 994 .vmstart64_end: 995 lea rsp, [rbp + frm_fRFlags] 989 996 popf 990 pop xBP997 leave 991 998 ret 992 999 1000 ; 1001 ; Error returns. 1002 ; 1003 %ifdef VBOX_STRICT 1004 .vmwrite_failed: 1005 mov dword [rsp + cbFrame + frm_rcError], VERR_VMX_INVALID_VMCS_FIELD 1006 jz .return_after_vmwrite_error 1007 mov dword [rsp + cbFrame + frm_rcError], VERR_VMX_INVALID_VMCS_PTR 1008 jmp .return_after_vmwrite_error 1009 %endif 993 1010 .vmxstart64_invalid_vmcs_ptr: 994 RESTORE_STATE_VM64 995 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM 1011 mov dword [rsp + cbFrame + frm_rcError], VERR_VMX_INVALID_VMCS_PTR_TO_START_VM 1012 jmp .vmstart64_error_return 1013 .vmxstart64_start_failed: 1014 mov dword [rsp + cbFrame + frm_rcError], VERR_VMX_UNABLE_TO_START_VM 1015 .vmstart64_error_return: 1016 RESTORE_STATE_VMX 1 1017 mov eax, [rbp + frm_rcError] 996 1018 jmp .vmstart64_end 997 998 .vmxstart64_start_failed: 999 RESTORE_STATE_VM64 1000 mov eax, VERR_VMX_UNABLE_TO_START_VM 1001 jmp .vmstart64_end 1019 %undef frm_fRFlags 1020 %undef frm_pGstCtx 1021 %undef frm_uHostXcr0 1022 %undef frm_saved_gdtr 1023 %undef frm_saved_tr 1024 %undef frm_fNoRestoreXcr0 1025 %undef frm_saved_idtr 1026 %undef frm_saved_ldtr 1027 %undef frm_rcError 1028 %undef frm_guest_rax 1029 %undef cbFrame 1002 1030 ENDPROC hmR0VMXStartVM 1003 1031
Note:
See TracChangeset
for help on using the changeset viewer.