Changeset 87372 in vbox
- Timestamp:
- Jan 22, 2021 3:01:51 PM (4 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMR0A.asm
r87361 r87372 916 916 917 917 918 %ifdef VBOX_WITH_KERNEL_USING_XMM919 918 ;; 920 ; Wrapper around svm.pfnVMRun that preserves host XMM registers and 921 ; load the guest ones when necessary. 922 ; 923 ; @cproto DECLASM(int) hmR0SVMRunWrapXMM(PVM pVM, PVMCPU pVCpu, RTHCPHYS HCPhysVmcb, PFNHMSVMVMRUN pfnVMRun); 924 ; 925 ; @returns eax 926 ; 927 ; @param pVM msc:rcx 928 ; @param pVCpu msc:rdx The cross context virtual CPU structure of the calling EMT. 929 ; @param HCPhysVmcb msc:r8 930 ; @param pfnVMRun msc:r9 931 ; 932 ; @remarks This is essentially the same code as hmR0VMXStartVMWrapXMM, only the parameters differ a little bit. 933 ; 934 ; @remarks Drivers shouldn't use AVX registers without saving+loading: 919 ; hmR0SvmVmRun template 920 ; 921 ; @param 1 The suffix of the variation. 922 ; @param 2 fLoadSaveGuestXcr0 value 923 ; @param 3 The CPUMCTX_WSF_IBPB_ENTRY + CPUMCTX_WSF_IBPB_EXIT value. 924 ; @param 4 The SSE saving/restoring: 0 to do nothing, 1 to do it manually, 2 to use xsave/xrstor. 925 ; Drivers shouldn't use AVX registers without saving+loading: 935 926 ; https://msdn.microsoft.com/en-us/library/windows/hardware/ff545910%28v=vs.85%29.aspx?f=255&MSPPError=-2147217396 936 927 ; However the compiler docs have different idea: … … 938 929 ; We'll go with the former for now. 939 930 ; 940 ; ASSUMING 64-bit and windows for now.941 ALIGNCODE(64)942 BEGINPROC hmR0SVMRunWrapXMM943 SEH64_END_PROLOGUE944 push xBP945 mov xBP, xSP946 sub xSP, 0b0h + 040h ; don't bother optimizing the frame size947 948 %ifndef ASM_CALL64_MSC949 %error "MSC only"950 %endif951 ; Spill input parameters.952 mov [xBP + 010h], rcx ; pVM953 mov [xBP + 018h], rdx ; pVCpu954 mov [xBP + 020h], r8 ; HCPhysVmcb955 mov [xBP + 028h], r9 ; pfnVMRun956 957 ; Ask CPUM whether we've started using the FPU yet.958 ;; @todo implement this in assembly, it's just checking a couple of things. Or have the C code do it.959 mov rcx, rdx ; pVCpu960 call NAME(CPUMIsGuestFPUStateActive)961 test al, al962 963 mov rcx, [xBP + 010h] ; pVM964 mov rdx, [xBP + 018h] ; pVCpu965 mov r8, [xBP + 020h] ; HCPhysVmcb966 mov r9, [xBP + 028h] ; pfnVMRun967 968 jnz .guest_fpu_state_active969 970 ; No need to mess with XMM registers just call the start routine and return.971 call r9972 973 leave974 ret975 976 ALIGNCODE(8)977 .guest_fpu_state_active:978 ; Save the non-volatile host XMM registers.979 ;; @todo change to rbp relative addressing as that saves a byte per instruction!980 movdqa [rsp + 040h + 000h], xmm6981 movdqa [rsp + 040h + 010h], xmm7982 movdqa [rsp + 040h + 020h], xmm8983 movdqa [rsp + 040h + 030h], xmm9984 movdqa [rsp + 040h + 040h], xmm10985 movdqa [rsp + 040h + 050h], xmm11986 movdqa [rsp + 040h + 060h], xmm12987 movdqa [rsp + 040h + 070h], xmm13988 movdqa [rsp + 040h + 080h], xmm14989 movdqa [rsp + 040h + 090h], xmm15990 stmxcsr [rsp + 040h + 0a0h]991 992 mov r11, rdx ; r11 = pVCpu (rdx may get trashed)993 mov eax, [rdx + VMCPU.cpum.GstCtx + CPUMCTX.fXStateMask]994 test eax, eax995 jz .guest_fpu_state_manually996 997 ;998 ; Using XSAVE.999 ;1000 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS1001 xor edx, edx1002 mov r10, [r11 + VMCPU.cpum.GstCtx + CPUMCTX.pXStateR0]1003 xrstor [r10]1004 1005 ; Make the call (same as in the other case).1006 mov rdx, r11 ; restore pVCpu to rdx1007 call r91008 1009 mov r10d, eax ; save return value (xsave below uses eax)1010 1011 ; Save the guest XMM registers.1012 mov rcx, [xBP + 018h] ; pVCpu1013 mov eax, [rcx + VMCPU.cpum.GstCtx + CPUMCTX.fXStateMask]1014 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS1015 mov rcx, [rcx + VMCPU.cpum.GstCtx + CPUMCTX.pXStateR0]1016 xor edx, edx1017 xsave [rcx]1018 1019 mov eax, r10d ; restore return value1020 1021 .restore_non_volatile_host_xmm_regs:1022 ; Load the non-volatile host XMM registers.1023 ;; @todo change to rbp relative addressing as that saves a byte per instruction!1024 movdqa xmm6, [rsp + 040h + 000h]1025 movdqa xmm7, [rsp + 040h + 010h]1026 movdqa xmm8, [rsp + 040h + 020h]1027 movdqa xmm9, [rsp + 040h + 030h]1028 movdqa xmm10, [rsp + 040h + 040h]1029 movdqa xmm11, [rsp + 040h + 050h]1030 movdqa xmm12, [rsp + 040h + 060h]1031 movdqa xmm13, [rsp + 040h + 070h]1032 movdqa xmm14, [rsp + 040h + 080h]1033 movdqa xmm15, [rsp + 040h + 090h]1034 ldmxcsr [rsp + 040h + 0a0h]1035 leave1036 ret1037 1038 ;1039 ; No XSAVE, load and save the guest XMM registers manually.1040 ;1041 ALIGNCODE(8)1042 .guest_fpu_state_manually:1043 ; Load the full guest XMM register state.1044 mov rdx, [r11 + VMCPU.cpum.GstCtx + CPUMCTX.pXStateR0]1045 movdqa xmm0, [rdx + XMM_OFF_IN_X86FXSTATE + 000h]1046 movdqa xmm1, [rdx + XMM_OFF_IN_X86FXSTATE + 010h]1047 movdqa xmm2, [rdx + XMM_OFF_IN_X86FXSTATE + 020h]1048 movdqa xmm3, [rdx + XMM_OFF_IN_X86FXSTATE + 030h]1049 movdqa xmm4, [rdx + XMM_OFF_IN_X86FXSTATE + 040h]1050 movdqa xmm5, [rdx + XMM_OFF_IN_X86FXSTATE + 050h]1051 movdqa xmm6, [rdx + XMM_OFF_IN_X86FXSTATE + 060h]1052 movdqa xmm7, [rdx + XMM_OFF_IN_X86FXSTATE + 070h]1053 movdqa xmm8, [rdx + XMM_OFF_IN_X86FXSTATE + 080h]1054 movdqa xmm9, [rdx + XMM_OFF_IN_X86FXSTATE + 090h]1055 movdqa xmm10, [rdx + XMM_OFF_IN_X86FXSTATE + 0a0h]1056 movdqa xmm11, [rdx + XMM_OFF_IN_X86FXSTATE + 0b0h]1057 movdqa xmm12, [rdx + XMM_OFF_IN_X86FXSTATE + 0c0h]1058 movdqa xmm13, [rdx + XMM_OFF_IN_X86FXSTATE + 0d0h]1059 movdqa xmm14, [rdx + XMM_OFF_IN_X86FXSTATE + 0e0h]1060 movdqa xmm15, [rdx + XMM_OFF_IN_X86FXSTATE + 0f0h]1061 ldmxcsr [rdx + X86FXSTATE.MXCSR]1062 1063 ; Make the call (same as in the other case).1064 mov rdx, r11 ; restore pVCpu to rdx1065 call r91066 1067 ; Save the guest XMM registers.1068 mov rdx, [xBP + 018h] ; pVCpu1069 mov rdx, [rdx + VMCPU.cpum.GstCtx + CPUMCTX.pXStateR0]1070 stmxcsr [rdx + X86FXSTATE.MXCSR]1071 movdqa [rdx + XMM_OFF_IN_X86FXSTATE + 000h], xmm01072 movdqa [rdx + XMM_OFF_IN_X86FXSTATE + 010h], xmm11073 movdqa [rdx + XMM_OFF_IN_X86FXSTATE + 020h], xmm21074 movdqa [rdx + XMM_OFF_IN_X86FXSTATE + 030h], xmm31075 movdqa [rdx + XMM_OFF_IN_X86FXSTATE + 040h], xmm41076 movdqa [rdx + XMM_OFF_IN_X86FXSTATE + 050h], xmm51077 movdqa [rdx + XMM_OFF_IN_X86FXSTATE + 060h], xmm61078 movdqa [rdx + XMM_OFF_IN_X86FXSTATE + 070h], xmm71079 movdqa [rdx + XMM_OFF_IN_X86FXSTATE + 080h], xmm81080 movdqa [rdx + XMM_OFF_IN_X86FXSTATE + 090h], xmm91081 movdqa [rdx + XMM_OFF_IN_X86FXSTATE + 0a0h], xmm101082 movdqa [rdx + XMM_OFF_IN_X86FXSTATE + 0b0h], xmm111083 movdqa [rdx + XMM_OFF_IN_X86FXSTATE + 0c0h], xmm121084 movdqa [rdx + XMM_OFF_IN_X86FXSTATE + 0d0h], xmm131085 movdqa [rdx + XMM_OFF_IN_X86FXSTATE + 0e0h], xmm141086 movdqa [rdx + XMM_OFF_IN_X86FXSTATE + 0f0h], xmm151087 jmp .restore_non_volatile_host_xmm_regs1088 ENDPROC hmR0SVMRunWrapXMM1089 1090 %endif ; VBOX_WITH_KERNEL_USING_XMM1091 1092 ;;1093 ; hmR0SvmVmRun template1094 ;1095 ; @param 1 The suffix of the variation.1096 ; @param 2 fLoadSaveGuestXcr0 value1097 ; @param 3 The CPUMCTX_WSF_IBPB_ENTRY + CPUMCTX_WSF_IBPB_EXIT value.1098 ; @param 4 The SSE saving/restoring: 0 to do nothing, 1 to do it manually, 2 to use xsave/xrstor.1099 931 %macro hmR0SvmVmRunTemplate 4 1100 932 … … 1111 943 BEGINPROC RT_CONCAT(hmR0SvmVmRun,%1) 1112 944 %ifdef VBOX_WITH_KERNEL_USING_XMM 1113 %if %4 = = 0 &&0945 %if %4 = 0 1114 946 ; 1115 947 ; The non-saving variant will currently check the two SSE preconditions and pick … … 1117 949 ; move these decisions into hmR0SvmUpdateVmRunFunction(). 1118 950 ; 1119 test byte [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fUsedFpuGuest], 1 951 %ifdef ASM_CALL64_MSC 952 test byte [rdx + VMCPU.cpum.GstCtx + CPUMCTX.fUsedFpuGuest], 1 953 %else 954 test byte [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fUsedFpuGuest], 1 955 %endif 1120 956 jz .save_xmm_no_need 957 %ifdef ASM_CALL64_MSC 1121 958 cmp dword [rdx + VMCPU.cpum.GstCtx + CPUMCTX.fXStateMask], 0 959 %else 960 cmp dword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fXStateMask], 0 961 %endif 1122 962 je RT_CONCAT3(hmR0SvmVmRun,%1,_SseManual) 1123 963 jmp RT_CONCAT3(hmR0SvmVmRun,%1,_SseXSave) … … 1130 970 SEH64_SET_FRAME_xBP 0 1131 971 pushf 1132 sub rsp, 30h - 8h ; The frame is 30h bytes, but the rbp-08h entry is the above pushf. 1133 SEH64_ALLOCATE_STACK 30h ; And we have CALLEE_PRESERVED_REGISTER_COUNT following it. 1134 1135 %define frm_fRFlags -08h 1136 %define frm_uHostXcr0 -18h ; 128-bit 1137 ;%define frm_fNoRestoreXcr0 -20h ; Non-zero if we should skip XCR0 restoring. 1138 %define frm_pGstCtx -28h ; Where we stash guest CPU context for use after the vmrun. 1139 %define frm_HCPhysVmcbHost -30h ; Where we stash HCPhysVmcbHost for the vmload after vmrun. 1140 %assign cbFrame 30h 972 %assign cbFrame 30h 973 %if %4 != 0 974 %assign cbFrame cbFrame + 16 * 11 ; Reserve space for 10x 128-bit XMM registers and MXCSR (32-bit) 975 %endif 976 %assign cbBaseFrame cbFrame 977 sub rsp, cbFrame - 8h ; We subtract 8 bytes for the above pushf 978 SEH64_ALLOCATE_STACK cbFrame ; And we have CALLEE_PRESERVED_REGISTER_COUNT following it. 979 980 %define frm_fRFlags -008h 981 %define frm_uHostXcr0 -018h ; 128-bit 982 ;%define frm_fNoRestoreXcr0 -020h ; Non-zero if we should skip XCR0 restoring. 983 %define frm_pGstCtx -028h ; Where we stash guest CPU context for use after the vmrun. 984 %define frm_HCPhysVmcbHost -030h ; Where we stash HCPhysVmcbHost for the vmload after vmrun. 985 %if %4 != 0 986 %define frm_saved_xmm6 -040h 987 %define frm_saved_xmm7 -050h 988 %define frm_saved_xmm8 -060h 989 %define frm_saved_xmm9 -070h 990 %define frm_saved_xmm10 -080h 991 %define frm_saved_xmm11 -090h 992 %define frm_saved_xmm12 -0a0h 993 %define frm_saved_xmm13 -0b0h 994 %define frm_saved_xmm14 -0c0h 995 %define frm_saved_xmm15 -0d0h 996 %define frm_saved_mxcsr -0e0h 997 %endif 1141 998 1142 999 ; Manual save and restore: … … 1152 1009 PUSH_CALLEE_PRESERVED_REGISTERS 1153 1010 SEH64_END_PROLOGUE 1154 %if cbFrame != ( 30h+ 8 * CALLEE_PRESERVED_REGISTER_COUNT)1011 %if cbFrame != (cbBaseFrame + 8 * CALLEE_PRESERVED_REGISTER_COUNT) 1155 1012 %error Bad cbFrame value 1156 1013 %endif … … 1179 1036 1180 1037 %ifdef VBOX_WITH_KERNEL_USING_XMM 1181 %if %4 == 01182 1183 % elif %4 == 11184 %elif %4 == 21038 mov eax, VERR_SVM_VMRUN_PRECOND_2 1039 test byte [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fUsedFpuGuest], 1 1040 %if %4 = 0 1041 ;jnz .failure_return 1185 1042 %else 1186 %error Invalid template parameter 4. 1043 jz .failure_return 1044 1045 mov eax, VERR_SVM_VMRUN_PRECOND_3 1046 cmp dword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fXStateMask], 0 1047 %if %4 = 1 1048 jne .failure_return 1049 %elif %4 = 2 1050 je .failure_return 1051 %else 1052 %error Invalid template parameter 4. 1053 %endif 1187 1054 %endif 1055 %endif 1056 %endif ; VBOX_STRICT 1057 1058 %if %4 != 0 1059 ; Save the non-volatile SSE host register state. 1060 movdqa [rbp + frm_saved_xmm6 ], xmm6 1061 movdqa [rbp + frm_saved_xmm7 ], xmm7 1062 movdqa [rbp + frm_saved_xmm8 ], xmm8 1063 movdqa [rbp + frm_saved_xmm9 ], xmm9 1064 movdqa [rbp + frm_saved_xmm10], xmm10 1065 movdqa [rbp + frm_saved_xmm11], xmm11 1066 movdqa [rbp + frm_saved_xmm12], xmm12 1067 movdqa [rbp + frm_saved_xmm13], xmm13 1068 movdqa [rbp + frm_saved_xmm14], xmm14 1069 movdqa [rbp + frm_saved_xmm15], xmm15 1070 stmxcsr [rbp + frm_saved_mxcsr] 1071 1072 ; Load the guest state related to the above non-volatile and volatile SSE registers. Trashes rcx, eax and edx. 1073 mov rcx, [rsi + VMCPU.cpum.GstCtx + CPUMCTX.pXStateR0] 1074 %if %4 = 1 ; manual 1075 movdqa xmm0, [rcx + XMM_OFF_IN_X86FXSTATE + 000h] 1076 movdqa xmm1, [rcx + XMM_OFF_IN_X86FXSTATE + 010h] 1077 movdqa xmm2, [rcx + XMM_OFF_IN_X86FXSTATE + 020h] 1078 movdqa xmm3, [rcx + XMM_OFF_IN_X86FXSTATE + 030h] 1079 movdqa xmm4, [rcx + XMM_OFF_IN_X86FXSTATE + 040h] 1080 movdqa xmm5, [rcx + XMM_OFF_IN_X86FXSTATE + 050h] 1081 movdqa xmm6, [rcx + XMM_OFF_IN_X86FXSTATE + 060h] 1082 movdqa xmm7, [rcx + XMM_OFF_IN_X86FXSTATE + 070h] 1083 movdqa xmm8, [rcx + XMM_OFF_IN_X86FXSTATE + 080h] 1084 movdqa xmm9, [rcx + XMM_OFF_IN_X86FXSTATE + 090h] 1085 movdqa xmm10, [rcx + XMM_OFF_IN_X86FXSTATE + 0a0h] 1086 movdqa xmm11, [rcx + XMM_OFF_IN_X86FXSTATE + 0b0h] 1087 movdqa xmm12, [rcx + XMM_OFF_IN_X86FXSTATE + 0c0h] 1088 movdqa xmm13, [rcx + XMM_OFF_IN_X86FXSTATE + 0d0h] 1089 movdqa xmm14, [rcx + XMM_OFF_IN_X86FXSTATE + 0e0h] 1090 movdqa xmm15, [rcx + XMM_OFF_IN_X86FXSTATE + 0f0h] 1091 ldmxcsr [rcx + X86FXSTATE.MXCSR] 1092 %elif %4 = 2 ; use xrstor/xsave 1093 mov eax, [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fXStateMask] 1094 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS 1095 xor edx, edx 1096 xrstor [rcx] 1097 %else 1098 %error invalid template parameter 4 1188 1099 %endif 1189 1100 %endif … … 1297 1208 mov r15, [rbp + frm_saved_r15] 1298 1209 1210 %if %4 != 0 1211 ; Set r8 = &pVCpu->cpum.GstCtx; for use below when saving and restoring SSE state. 1212 mov r8, rax 1213 %endif 1214 1299 1215 ; Fight spectre. Note! Trashes rax, rdx and rcx! 1300 1216 %if %3 & CPUMCTX_WSF_IBPB_EXIT … … 1314 1230 %endif 1315 1231 1232 %if %4 != 0 1233 ; Save the guest SSE state related to non-volatile and volatile SSE registers. 1234 mov rcx, [r8 + CPUMCTX.pXStateR0] 1235 %if %4 = 1 ; manual 1236 stmxcsr [rcx + X86FXSTATE.MXCSR] 1237 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 000h], xmm0 1238 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 010h], xmm1 1239 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 020h], xmm2 1240 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 030h], xmm3 1241 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 040h], xmm4 1242 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 050h], xmm5 1243 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 060h], xmm6 1244 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 070h], xmm7 1245 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 080h], xmm8 1246 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 090h], xmm9 1247 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0a0h], xmm10 1248 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0b0h], xmm11 1249 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0c0h], xmm12 1250 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0d0h], xmm13 1251 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0e0h], xmm14 1252 movdqa [rcx + XMM_OFF_IN_X86FXSTATE + 0f0h], xmm15 1253 %elif %4 = 2 ; use xrstor/xsave 1254 mov eax, [r8 + CPUMCTX.fXStateMask] 1255 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS 1256 xor edx, edx 1257 xsave [rcx] 1258 %else 1259 %error invalid template parameter 4 1260 %endif 1261 1262 ; Restore the host non-volatile SSE register state. 1263 ldmxcsr [rbp + frm_saved_mxcsr] 1264 movdqa [rbp + frm_saved_xmm6 ], xmm6 1265 movdqa [rbp + frm_saved_xmm7 ], xmm7 1266 movdqa [rbp + frm_saved_xmm8 ], xmm8 1267 movdqa [rbp + frm_saved_xmm9 ], xmm9 1268 movdqa [rbp + frm_saved_xmm10], xmm10 1269 movdqa [rbp + frm_saved_xmm11], xmm11 1270 movdqa [rbp + frm_saved_xmm12], xmm12 1271 movdqa [rbp + frm_saved_xmm13], xmm13 1272 movdqa [rbp + frm_saved_xmm14], xmm14 1273 movdqa [rbp + frm_saved_xmm15], xmm15 1274 %endif ; %4 != 0 1275 1316 1276 ; Epilogue (assumes we restored volatile registers above when saving the guest GPRs). 1317 1277 mov eax, VINF_SUCCESS … … 1325 1285 .failure_return: 1326 1286 POP_CALLEE_PRESERVED_REGISTERS 1327 %if cbFrame != 30h1328 %error Bad cbFrame value1287 %if cbFrame != cbBaseFrame 1288 %error Bad frame size value: cbFrame 1329 1289 %endif 1330 1290 add rsp, cbFrame - 8h … … 1354 1314 hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_WithIbpbExit, 0, CPUMCTX_WSF_IBPB_ENTRY | CPUMCTX_WSF_IBPB_EXIT, 0 1355 1315 hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_WithIbpbExit, 1, CPUMCTX_WSF_IBPB_ENTRY | CPUMCTX_WSF_IBPB_EXIT, 0 1356 ;%ifdef VBOX_WITH_KERNEL_USING_XMM1357 ;hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_SansIbpbExit_SseManual, 0, 0, 11358 ;hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_SansIbpbExit_SseManual, 1, 0, 11359 ;hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_SansIbpbExit_SseManual, 0, CPUMCTX_WSF_IBPB_ENTRY, 11360 ;hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_SansIbpbExit_SseManual, 1, CPUMCTX_WSF_IBPB_ENTRY, 11361 ;hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_WithIbpbExit_SseManual, 0, CPUMCTX_WSF_IBPB_EXIT, 11362 ;hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_WithIbpbExit_SseManual, 1, CPUMCTX_WSF_IBPB_EXIT, 11363 ;hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_WithIbpbExit_SseManual, 0, CPUMCTX_WSF_IBPB_ENTRY | CPUMCTX_WSF_IBPB_EXIT, 11364 ;hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_WithIbpbExit_SseManual, 1, CPUMCTX_WSF_IBPB_ENTRY | CPUMCTX_WSF_IBPB_EXIT, 11365 ; 1366 ;hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_SansIbpbExit_SseXSave, 0, 0, 21367 ;hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_SansIbpbExit_SseXSave, 1, 0, 21368 ;hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_SansIbpbExit_SseXSave, 0, CPUMCTX_WSF_IBPB_ENTRY, 21369 ;hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_SansIbpbExit_SseXSave, 1, CPUMCTX_WSF_IBPB_ENTRY, 21370 ;hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_WithIbpbExit_SseXSave, 0, CPUMCTX_WSF_IBPB_EXIT, 21371 ;hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_WithIbpbExit_SseXSave, 1, CPUMCTX_WSF_IBPB_EXIT, 21372 ;hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_WithIbpbExit_SseXSave, 0, CPUMCTX_WSF_IBPB_ENTRY | CPUMCTX_WSF_IBPB_EXIT, 21373 ;hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_WithIbpbExit_SseXSave, 1, CPUMCTX_WSF_IBPB_ENTRY | CPUMCTX_WSF_IBPB_EXIT, 21374 ;%endif1375 1316 %ifdef VBOX_WITH_KERNEL_USING_XMM 1317 hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_SansIbpbExit_SseManual, 0, 0, 1 1318 hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_SansIbpbExit_SseManual, 1, 0, 1 1319 hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_SansIbpbExit_SseManual, 0, CPUMCTX_WSF_IBPB_ENTRY, 1 1320 hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_SansIbpbExit_SseManual, 1, CPUMCTX_WSF_IBPB_ENTRY, 1 1321 hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_WithIbpbExit_SseManual, 0, CPUMCTX_WSF_IBPB_EXIT, 1 1322 hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_WithIbpbExit_SseManual, 1, CPUMCTX_WSF_IBPB_EXIT, 1 1323 hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_WithIbpbExit_SseManual, 0, CPUMCTX_WSF_IBPB_ENTRY | CPUMCTX_WSF_IBPB_EXIT, 1 1324 hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_WithIbpbExit_SseManual, 1, CPUMCTX_WSF_IBPB_ENTRY | CPUMCTX_WSF_IBPB_EXIT, 1 1325 1326 hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_SansIbpbExit_SseXSave, 0, 0, 2 1327 hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_SansIbpbExit_SseXSave, 1, 0, 2 1328 hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_SansIbpbExit_SseXSave, 0, CPUMCTX_WSF_IBPB_ENTRY, 2 1329 hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_SansIbpbExit_SseXSave, 1, CPUMCTX_WSF_IBPB_ENTRY, 2 1330 hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_WithIbpbExit_SseXSave, 0, CPUMCTX_WSF_IBPB_EXIT, 2 1331 hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_WithIbpbExit_SseXSave, 1, CPUMCTX_WSF_IBPB_EXIT, 2 1332 hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_WithIbpbExit_SseXSave, 0, CPUMCTX_WSF_IBPB_ENTRY | CPUMCTX_WSF_IBPB_EXIT, 2 1333 hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_WithIbpbExit_SseXSave, 1, CPUMCTX_WSF_IBPB_ENTRY | CPUMCTX_WSF_IBPB_EXIT, 2 1334 %endif 1335 -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r87361 r87372 4319 4319 /* Mark that HM is the keeper of all guest-CPU registers now that we're going to execute guest code. */ 4320 4320 pVCpu->cpum.GstCtx.fExtrn |= HMSVM_CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_HM; 4321 4322 /* 4323 * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses 4324 * floating-point operations using SSE instructions. Some XMM registers (XMM6-XMM15) are 4325 * callee-saved and thus the need for this XMM wrapper. 4326 * 4327 * Refer MSDN "Configuring Programs for 64-bit/x64 Software Conventions / Register Usage". 4328 */ 4329 PVMCC pVM = pVCpu->CTX_SUFF(pVM); 4330 #ifdef VBOX_WITH_KERNEL_USING_XMM 4331 return hmR0SVMRunWrapXMM(pVM, pVCpu, HCPhysVmcb, pVCpu->hm.s.svm.pfnVMRun); 4332 #else 4333 return pVCpu->hm.s.svm.pfnVMRun(pVM, pVCpu, HCPhysVmcb); 4334 #endif 4321 return pVCpu->hm.s.svm.pfnVMRun(pVCpu->CTX_SUFF(pVM), pVCpu, HCPhysVmcb); 4335 4322 } 4336 4323 -
trunk/src/VBox/VMM/include/HMInternal.h
r87359 r87372 1233 1233 DECLASM(int) hmR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, void *pvUnused, PVMCC pVM, PVMCPUCC pVCpu, 1234 1234 PFNHMVMXSTARTVM pfnStartVM); 1235 DECLASM(int) hmR0SVMRunWrapXMM(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB, PFNHMSVMVMRUN pfnVMRun);1236 1235 # endif 1237 1236 DECLASM(void) hmR0MdsClear(void);
Note:
See TracChangeset
for help on using the changeset viewer.