Changeset 87359 in vbox
- Timestamp:
- Jan 21, 2021 7:56:26 PM (4 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMR0A.asm
r87344 r87359 19 19 ;* Header Files * 20 20 ;********************************************************************************************************************************* 21 %define RT_ASM_WITH_SEH64 21 ;%define RT_ASM_WITH_SEH64 - trouble with SEH, alignment and (probably) 2nd pass optimizations. 22 22 %include "VBox/asmdefs.mac" 23 23 %include "VBox/err.mac" … … 1091 1091 1092 1092 ;; 1093 ; hmR0SvmVmRun template 1094 ; 1095 ; @param 1 The suffix of the variation. 1096 ; @param 2 fLoadSaveGuestXcr0 value 1097 ; @param 3 The CPUMCTX_WSF_IBPB_ENTRY + CPUMCTX_WSF_IBPB_EXIT value. 1098 %macro hmR0SvmVmRunTemplate 3 1099 1100 ;; 1093 1101 ; Prepares for and executes VMRUN (32-bit and 64-bit guests). 1094 1102 ; … … 1098 1106 ; @param HCPhysVmcb msc:r8, gcc:rdx Physical address of guest VMCB. 1099 1107 ; 1100 ALIGNCODE(64) 1101 BEGINPROC SVMR0VMRun 1108 ALIGNCODE(64) ; This + immediate optimizations causes serious trouble for yasm and the SEH frames: prologue -28 bytes, must be <256 1109 ; So the SEH64_XXX stuff is currently not operational. 1110 BEGINPROC RT_CONCAT(hmR0SvmVmRun,%1) 1102 1111 push rbp 1103 1112 SEH64_PUSH_xBP … … 1105 1114 SEH64_SET_FRAME_xBP 0 1106 1115 pushf 1107 sub rsp, 30h - 8h 1116 sub rsp, 30h - 8h ; The frame is 30h bytes, but the rbp-08h entry is the above pushf. 1108 1117 SEH64_ALLOCATE_STACK 30h ; And we have CALLEE_PRESERVED_REGISTER_COUNT following it. 1109 1118 1110 %define frm_fRFlags -08h1111 %define frm_uHostXcr0 -18h; 128-bit1112 %define frm_fNoRestoreXcr0 -20h; Non-zero if we should skip XCR0 restoring.1113 %define frm_pGstCtx -28h; Where we stash guest CPU context for use after the vmrun.1114 %define frm_HCPhysVmcbHost -30h; Where we stash HCPhysVmcbHost for the vmload after vmrun.1115 %assign cbFrame 30h1119 %define frm_fRFlags -08h 1120 %define frm_uHostXcr0 -18h ; 128-bit 1121 ;%define frm_fNoRestoreXcr0 -20h ; Non-zero if we should skip XCR0 restoring. 1122 %define frm_pGstCtx -28h ; Where we stash guest CPU context for use after the vmrun. 1123 %define frm_HCPhysVmcbHost -30h ; Where we stash HCPhysVmcbHost for the vmload after vmrun. 1124 %assign cbFrame 30h 1116 1125 1117 1126 ; Manual save and restore: … … 1127 1136 PUSH_CALLEE_PRESERVED_REGISTERS 1128 1137 SEH64_END_PROLOGUE 1129 %if cbFrame != (30h + 8 * CALLEE_PRESERVED_REGISTER_COUNT)1130 %error Bad cbFrame value1131 %endif1138 %if cbFrame != (30h + 8 * CALLEE_PRESERVED_REGISTER_COUNT) 1139 %error Bad cbFrame value 1140 %endif 1132 1141 1133 1142 ; Shuffle parameter registers so that r8=HCPhysVmcb and rsi=pVCpu. (rdx & rcx will soon be trashed.) 1134 %ifdef ASM_CALL64_GCC1143 %ifdef ASM_CALL64_GCC 1135 1144 mov r8, rdx ; Put HCPhysVmcb in r8 like on MSC as rdx is trashed below. 1136 %else1145 %else 1137 1146 mov rsi, rdx ; Put pVCpu in rsi like on GCC as rdx is trashed below. 1138 1147 ;mov rdi, rcx ; Put pVM in rdi like on GCC as rcx is trashed below. 1139 %endif 1140 1148 %endif 1149 1150 %ifdef VBOX_STRICT 1151 ; Verify template preconditions / parameters to ensure HMSVM.cpp didn't miss some state change. 1152 cmp byte [rsi + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], %2 1153 mov eax, VERR_SVM_VMRUN_PRECOND_0 1154 jne .failure_return 1155 1156 mov eax, [rsi + VMCPU.cpum.GstCtx + CPUMCTX.fWorldSwitcher] 1157 and eax, CPUMCTX_WSF_IBPB_ENTRY | CPUMCTX_WSF_IBPB_EXIT 1158 cmp eax, %3 1159 mov eax, VERR_SVM_VMRUN_PRECOND_1 1160 jne .failure_return 1161 %endif 1162 1163 %if %2 != 0 1141 1164 ; Save the host XCR0 and load the guest one if necessary. 1142 mov ecx, 3fh ; indicate that we need not restore XCR0 (in case we jump)1143 test byte [rsi + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 11144 jz .xcr0_before_skip1145 1146 1165 xor ecx, ecx 1147 1166 xgetbv ; save the host XCR0 on the stack … … 1153 1172 xor ecx, ecx ; paranoia; Also, indicates that we must restore XCR0 (moved into ecx, thus 0). 1154 1173 xsetbv 1155 1156 .xcr0_before_skip: 1157 mov [rbp + frm_fNoRestoreXcr0], rcx 1174 %endif 1158 1175 1159 1176 ; Save host fs, gs, sysenter msr etc. 1160 1177 mov rax, [rsi + VMCPU.hm + HMCPU.u + HMCPUSVM.HCPhysVmcbHost] 1161 mov qword [rbp + frm_HCPhysVmcbHost], rax 1178 mov qword [rbp + frm_HCPhysVmcbHost], rax ; save for the vmload after vmrun 1162 1179 lea rsi, [rsi + VMCPU.cpum.GstCtx] 1163 1180 mov qword [rbp + frm_pGstCtx], rsi 1164 1181 vmsave 1165 1182 1183 %if %3 & CPUMCTX_WSF_IBPB_ENTRY 1166 1184 ; Fight spectre (trashes rax, rdx and rcx). 1167 INDIRECT_BRANCH_PREDICTION_BARRIER_CTX rsi, CPUMCTX_WSF_IBPB_ENTRY 1185 mov ecx, MSR_IA32_PRED_CMD 1186 mov eax, MSR_IA32_PRED_CMD_F_IBPB 1187 xor edx, edx 1188 wrmsr 1189 %endif 1168 1190 1169 1191 ; Setup rax for VMLOAD. … … 1225 1247 mov r11, rcx 1226 1248 mov qword [rax + CPUMCTX.edi], rdi 1227 %ifdef ASM_CALL64_MSC1249 %ifdef ASM_CALL64_MSC 1228 1250 mov rdi, [rbp + frm_saved_rdi] 1229 %else1251 %else 1230 1252 mov rdi, rcx 1231 %endif1253 %endif 1232 1254 mov qword [rax + CPUMCTX.esi], rsi 1233 %ifdef ASM_CALL64_MSC1255 %ifdef ASM_CALL64_MSC 1234 1256 mov rsi, [rbp + frm_saved_rsi] 1235 %else1257 %else 1236 1258 mov rsi, rcx 1237 %endif1259 %endif 1238 1260 mov qword [rax + CPUMCTX.ebx], rbx 1239 1261 mov rbx, [rbp + frm_saved_rbx] … … 1248 1270 1249 1271 ; Fight spectre. Note! Trashes rax, rdx and rcx! 1250 INDIRECT_BRANCH_PREDICTION_BARRIER_CTX rax, CPUMCTX_WSF_IBPB_EXIT 1251 1252 ; Restore the host xcr0 if necessary. 1253 mov rcx, [rbp + frm_fNoRestoreXcr0] 1254 test ecx, ecx 1255 jnz .xcr0_after_skip 1272 %if %3 & CPUMCTX_WSF_IBPB_EXIT 1273 ; Fight spectre (trashes rax, rdx and rcx). 1274 mov ecx, MSR_IA32_PRED_CMD 1275 mov eax, MSR_IA32_PRED_CMD_F_IBPB 1276 xor edx, edx 1277 wrmsr 1278 %endif 1279 1280 %if %2 != 0 1281 ; Restore the host xcr0. 1282 xor ecx, ecx 1256 1283 mov rdx, [rbp + frm_uHostXcr0 + 8] 1257 1284 mov rax, [rbp + frm_uHostXcr0] 1258 xsetbv ; ecx is already zero 1259 .xcr0_after_skip: 1260 nop 1261 ; POP_CALLEE_PRESERVED_REGISTERS 1262 ;%if cbFrame != 30h 1263 ; %error Bad cbFrame value 1264 ;%endif 1265 1285 xsetbv 1286 %endif 1287 1288 ; Epilogue (assumes we restored volatile registers above when saving the guest GPRs). 1289 mov eax, VINF_SUCCESS 1266 1290 add rsp, cbFrame - 8h 1267 mov eax, VINF_SUCCESS1268 1291 popf 1269 1292 leave 1270 1293 ret 1294 1295 %ifdef VBOX_STRICT 1296 ; Precondition checks failed. 1297 .failure_return: 1298 POP_CALLEE_PRESERVED_REGISTERS 1299 %if cbFrame != 30h 1300 %error Bad cbFrame value 1301 %endif 1302 add rsp, cbFrame - 8h 1303 popf 1304 leave 1305 ret 1306 %endif 1307 1271 1308 %undef frm_uHostXcr0 1272 1309 %undef frm_fNoRestoreXcr0 … … 1274 1311 %undef frm_HCPhysVmcbHost 1275 1312 %undef cbFrame 1276 ENDPROC SVMR0VMRun 1277 1313 ENDPROC RT_CONCAT(hmR0SvmVmRun,%1) 1314 1315 %endmacro ; hmR0SvmVmRunTemplate 1316 1317 ; 1318 ; Instantiate the hmR0SvmVmRun various variations. 1319 ; 1320 hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_SansIbpbExit, 0, 0 1321 hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_SansIbpbExit, 1, 0 1322 hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_SansIbpbExit, 0, CPUMCTX_WSF_IBPB_ENTRY 1323 hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_SansIbpbExit, 1, CPUMCTX_WSF_IBPB_ENTRY 1324 hmR0SvmVmRunTemplate _SansXcr0_SansIbpbEntry_WithIbpbExit, 0, CPUMCTX_WSF_IBPB_EXIT 1325 hmR0SvmVmRunTemplate _WithXcr0_SansIbpbEntry_WithIbpbExit, 1, CPUMCTX_WSF_IBPB_EXIT 1326 hmR0SvmVmRunTemplate _SansXcr0_WithIbpbEntry_WithIbpbExit, 0, CPUMCTX_WSF_IBPB_ENTRY | CPUMCTX_WSF_IBPB_EXIT 1327 hmR0SvmVmRunTemplate _WithXcr0_WithIbpbEntry_WithIbpbExit, 1, CPUMCTX_WSF_IBPB_ENTRY | CPUMCTX_WSF_IBPB_EXIT 1328 -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r87332 r87359 705 705 706 706 /** 707 * Sets pfnVMRun to the best suited variant. 708 * 709 * This must be called whenever anything changes relative to the SVMR0VMRun 710 * variant selection: 711 * - pVCpu->hm.s.fLoadSaveGuestXcr0 712 * - CPUMCTX_WSF_IBPB_ENTRY in pVCpu->cpum.GstCtx.fWorldSwitcher 713 * - CPUMCTX_WSF_IBPB_EXIT in pVCpu->cpum.GstCtx.fWorldSwitcher 714 * - CPUMIsGuestFPUStateActive() (windows only) 715 * - CPUMCTX.fXStateMask (windows only) 716 * 717 * We currently ASSUME that neither CPUMCTX_WSF_IBPB_ENTRY nor 718 * CPUMCTX_WSF_IBPB_EXIT cannot be changed at runtime. 719 */ 720 static void hmR0SvmUpdateRunFunction(PVMCPUCC pVCpu) 721 { 722 static const PFNHMSVMVMRUN s_apfnHmR0SvmVmRunFunctions[] = 723 { 724 hmR0SvmVmRun_SansXcr0_SansIbpbEntry_SansIbpbExit, 725 hmR0SvmVmRun_WithXcr0_SansIbpbEntry_SansIbpbExit, 726 hmR0SvmVmRun_SansXcr0_WithIbpbEntry_SansIbpbExit, 727 hmR0SvmVmRun_WithXcr0_WithIbpbEntry_SansIbpbExit, 728 hmR0SvmVmRun_SansXcr0_SansIbpbEntry_WithIbpbExit, 729 hmR0SvmVmRun_WithXcr0_SansIbpbEntry_WithIbpbExit, 730 hmR0SvmVmRun_SansXcr0_WithIbpbEntry_WithIbpbExit, 731 hmR0SvmVmRun_WithXcr0_WithIbpbEntry_WithIbpbExit, 732 }; 733 uintptr_t const idx = (pVCpu->hm.s.fLoadSaveGuestXcr0 ? 1 : 0) 734 | (pVCpu->cpum.GstCtx.fWorldSwitcher & CPUMCTX_WSF_IBPB_ENTRY ? 2 : 0) 735 | (pVCpu->cpum.GstCtx.fWorldSwitcher & CPUMCTX_WSF_IBPB_EXIT ? 4 : 0); 736 PFNHMSVMVMRUN const pfnVMRun = s_apfnHmR0SvmVmRunFunctions[idx]; 737 if (pVCpu->hm.s.svm.pfnVMRun != pfnVMRun) 738 pVCpu->hm.s.svm.pfnVMRun = pfnVMRun; 739 } 740 741 742 /** 743 * Selector FNHMSVMVMRUN implementation. 744 */ 745 static DECLCALLBACK(int) hmR0SvmVMRunSelector(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhysVMCB) 746 { 747 hmR0SvmUpdateRunFunction(pVCpu); 748 return pVCpu->hm.s.svm.pfnVMRun(pVM, pVCpu, HCPhysVMCB); 749 } 750 751 752 /** 707 753 * Does per-VM AMD-V initialization. 708 754 * … … 745 791 * We now use a single handler for both 32-bit and 64-bit guests, see @bugref{6208#c73}. 746 792 */ 747 pVCpu->hm.s.svm.pfnVMRun = SVMR0VMRun;793 pVCpu->hm.s.svm.pfnVMRun = hmR0SvmVMRunSelector; 748 794 749 795 /* … … 1622 1668 1623 1669 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */ 1624 pVCpu->hm.s.fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0(); 1670 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0(); 1671 if (fLoadSaveGuestXcr0 != pVCpu->hm.s.fLoadSaveGuestXcr0) 1672 { 1673 pVCpu->hm.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0; 1674 hmR0SvmUpdateRunFunction(pVCpu); 1675 } 1625 1676 1626 1677 /* Avoid intercepting CR4 reads if the guest and shadow CR4 values are identical. */ … … 6513 6564 { 6514 6565 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 6515 pVCpu->hm.s.fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0(); 6516 Log4Func(("New XCR0=%#RX64 fLoadSaveGuestXcr0=%RTbool (cr4=%#RX64)\n", pCtx->aXcr[0], pVCpu->hm.s.fLoadSaveGuestXcr0, 6517 pCtx->cr4)); 6566 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0(); 6567 Log4Func(("New XCR0=%#RX64 fLoadSaveGuestXcr0=%RTbool (cr4=%#RX64)\n", pCtx->aXcr[0], fLoadSaveGuestXcr0, pCtx->cr4)); 6568 if (fLoadSaveGuestXcr0 != pVCpu->hm.s.fLoadSaveGuestXcr0) 6569 { 6570 pVCpu->hm.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0; 6571 hmR0SvmUpdateRunFunction(pVCpu); 6572 } 6518 6573 } 6519 6574 else if (rcStrict == VINF_IEM_RAISED_XCPT) -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.h
r87330 r87359 60 60 * @param pVCpu The cross context virtual CPU structure. 61 61 * @param HCPhyspVMCB Physical address of the VMCB. 62 * 63 * @remarks With spectre mitigations and the usual need for speed (/ micro 64 * optimizations), we have a bunch of variations of this code depending 65 * on a few precoditions. In release builds, the code is entirely 66 * without conditionals. Debug builds have a couple of assertions that 67 * shouldn't ever be triggered. 68 * 69 * @{ 62 70 */ 63 DECLASM(int) SVMR0VMRun(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB); 71 DECLASM(int) hmR0SvmVmRun_SansXcr0_SansIbpbEntry_SansIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB); 72 DECLASM(int) hmR0SvmVmRun_WithXcr0_SansIbpbEntry_SansIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB); 73 DECLASM(int) hmR0SvmVmRun_SansXcr0_WithIbpbEntry_SansIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB); 74 DECLASM(int) hmR0SvmVmRun_WithXcr0_WithIbpbEntry_SansIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB); 75 DECLASM(int) hmR0SvmVmRun_SansXcr0_SansIbpbEntry_WithIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB); 76 DECLASM(int) hmR0SvmVmRun_WithXcr0_SansIbpbEntry_WithIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB); 77 DECLASM(int) hmR0SvmVmRun_SansXcr0_WithIbpbEntry_WithIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB); 78 DECLASM(int) hmR0SvmVmRun_WithXcr0_WithIbpbEntry_WithIbpbExit(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB); 79 /** @} */ 80 64 81 65 82 /** -
trunk/src/VBox/VMM/include/HMInternal.h
r87330 r87359 702 702 703 703 /** SVM VMRun function, see SVMR0VMRun(). */ 704 typedef DECLCALLBACKTYPE(int, FNHMSVMVMRUN,(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhys pVMCB));704 typedef DECLCALLBACKTYPE(int, FNHMSVMVMRUN,(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhysVMCB)); 705 705 /** Pointer to a SVM VMRun function. */ 706 706 typedef R0PTRTYPE(FNHMSVMVMRUN *) PFNHMSVMVMRUN;
Note:
See TracChangeset
for help on using the changeset viewer.