Changeset 55738 in vbox for trunk/src/VBox/VMM
- Timestamp:
- May 7, 2015 6:42:49 PM (10 years ago)
- svn:sync-xref-src-repo-rev:
- 100151
- Location:
- trunk/src/VBox/VMM/VMMR0
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/CPUMR0A.asm
r55106 r55738 379 379 ; XSAVE 380 380 mov edx, [pCpumCpu + CPUMCPU.Guest.fXStateMask + 4] 381 %ifdef VBOX_WITH_KERNEL_USING_XMM 382 and eax, ~CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS ; Already saved in HMR0A.asm. 383 %endif 381 384 %ifdef RT_ARCH_AMD64 382 385 o64 xsave [pXState] … … 473 476 ; XRSTOR 474 477 mov edx, [pCpumCpu + CPUMCPU.Guest.fXStateMask + 4] 478 %ifdef VBOX_WITH_KERNEL_USING_XMM 479 and eax, ~CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS ; Will be loaded by HMR0A.asm. 480 %endif 475 481 %ifdef RT_ARCH_AMD64 476 482 o64 xrstor [pXState] -
trunk/src/VBox/VMM/VMMR0/HMR0A.asm
r55301 r55738 69 69 ; Use define because I'm too lazy to convert the struct. 70 70 %define XMM_OFF_IN_X86FXSTATE 160 71 71 72 72 73 ;; @def MYPUSHAD … … 1173 1174 ALIGNCODE(8) 1174 1175 .guest_fpu_state_active: 1175 ; Save the host XMM registers.1176 ; Save the non-volatile host XMM registers. 1176 1177 movdqa [rsp + 040h + 000h], xmm6 1177 1178 movdqa [rsp + 040h + 010h], xmm7 … … 1185 1186 movdqa [rsp + 040h + 090h], xmm15 1186 1187 1188 mov r10, [xBP + 018h] ; pCtx 1189 mov eax, [r10 + CPUMCTX.fXStateMask] 1190 test eax, eax 1191 jz .guest_fpu_state_manually 1192 1193 ; 1194 ; Using XSAVE to load the guest XMM, YMM and ZMM registers. 1195 ; 1196 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS 1197 xor edx, edx 1198 mov r10, [r10 + CPUMCTX.pXStateR0] 1199 xrstor [r10] 1200 1201 ; Make the call (same as in the other case ). 1202 mov r11, [xBP + 38h] ; pfnStartVM 1203 mov r10, [xBP + 30h] ; pVCpu 1204 mov [xSP + 020h], r10 1205 mov rcx, [xBP + 010h] ; fResumeVM 1206 mov rdx, [xBP + 018h] ; pCtx 1207 mov r8, [xBP + 020h] ; pVMCSCache 1208 mov r9, [xBP + 028h] ; pVM 1209 call r11 1210 1211 mov r11d, eax ; save return value (xsave below uses eax) 1212 1213 ; Save the guest XMM registers. 1214 mov r10, [xBP + 018h] ; pCtx 1215 mov eax, [r10 + CPUMCTX.fXStateMask] 1216 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS 1217 xor edx, edx 1218 mov r10, [r10 + CPUMCTX.pXStateR0] 1219 xsave [r10] 1220 1221 mov eax, r11d ; restore return value. 1222 1223 .restore_non_volatile_host_xmm_regs: 1224 ; Load the non-volatile host XMM registers. 1225 movdqa xmm6, [rsp + 040h + 000h] 1226 movdqa xmm7, [rsp + 040h + 010h] 1227 movdqa xmm8, [rsp + 040h + 020h] 1228 movdqa xmm9, [rsp + 040h + 030h] 1229 movdqa xmm10, [rsp + 040h + 040h] 1230 movdqa xmm11, [rsp + 040h + 050h] 1231 movdqa xmm12, [rsp + 040h + 060h] 1232 movdqa xmm13, [rsp + 040h + 070h] 1233 movdqa xmm14, [rsp + 040h + 080h] 1234 movdqa xmm15, [rsp + 040h + 090h] 1235 leave 1236 ret 1237 1238 ; 1239 ; No XSAVE, load and save the guest XMM registers manually. 1240 ; 1241 .guest_fpu_state_manually: 1187 1242 ; Load the full guest XMM register state. 1188 mov r10, [xBP + 018h] ; pCtx1189 1243 mov r10, [r10 + CPUMCTX.pXStateR0] 1190 1244 movdqa xmm0, [r10 + XMM_OFF_IN_X86FXSTATE + 000h] … … 1234 1288 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0e0h], xmm14 1235 1289 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0f0h], xmm15 1236 1237 ; Load the host XMM registers. 1290 jmp .restore_non_volatile_host_xmm_regs 1291 ENDPROC HMR0VMXStartVMWrapXMM 1292 1293 ;; 1294 ; Wrapper around svm.pfnVMRun that preserves host XMM registers and 1295 ; load the guest ones when necessary. 1296 ; 1297 ; @cproto DECLASM(int) HMR0SVMRunWrapXMM(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHMSVMVMRUN pfnVMRun); 1298 ; 1299 ; @returns eax 1300 ; 1301 ; @param pVMCBHostPhys msc:rcx 1302 ; @param pVMCBPhys msc:rdx 1303 ; @param pCtx msc:r8 1304 ; @param pVM msc:r9 1305 ; @param pVCpu msc:[rbp+30h] 1306 ; @param pfnVMRun msc:[rbp+38h] 1307 ; 1308 ; @remarks This is essentially the same code as HMR0VMXStartVMWrapXMM, only the parameters differ a little bit. 1309 ; 1310 ; ASSUMING 64-bit and windows for now. 1311 ALIGNCODE(16) 1312 BEGINPROC HMR0SVMRunWrapXMM 1313 push xBP 1314 mov xBP, xSP 1315 sub xSP, 0a0h + 040h ; Don't bother optimizing the frame size. 1316 1317 ; spill input parameters. 1318 mov [xBP + 010h], rcx ; pVMCBHostPhys 1319 mov [xBP + 018h], rdx ; pVMCBPhys 1320 mov [xBP + 020h], r8 ; pCtx 1321 mov [xBP + 028h], r9 ; pVM 1322 1323 ; Ask CPUM whether we've started using the FPU yet. 1324 mov rcx, [xBP + 30h] ; pVCpu 1325 call NAME(CPUMIsGuestFPUStateActive) 1326 test al, al 1327 jnz .guest_fpu_state_active 1328 1329 ; No need to mess with XMM registers just call the start routine and return. 1330 mov r11, [xBP + 38h] ; pfnVMRun 1331 mov r10, [xBP + 30h] ; pVCpu 1332 mov [xSP + 020h], r10 1333 mov rcx, [xBP + 010h] ; pVMCBHostPhys 1334 mov rdx, [xBP + 018h] ; pVMCBPhys 1335 mov r8, [xBP + 020h] ; pCtx 1336 mov r9, [xBP + 028h] ; pVM 1337 call r11 1338 1339 leave 1340 ret 1341 1342 ALIGNCODE(8) 1343 .guest_fpu_state_active: 1344 ; Save the non-volatile host XMM registers. 1345 movdqa [rsp + 040h + 000h], xmm6 1346 movdqa [rsp + 040h + 010h], xmm7 1347 movdqa [rsp + 040h + 020h], xmm8 1348 movdqa [rsp + 040h + 030h], xmm9 1349 movdqa [rsp + 040h + 040h], xmm10 1350 movdqa [rsp + 040h + 050h], xmm11 1351 movdqa [rsp + 040h + 060h], xmm12 1352 movdqa [rsp + 040h + 070h], xmm13 1353 movdqa [rsp + 040h + 080h], xmm14 1354 movdqa [rsp + 040h + 090h], xmm15 1355 1356 mov r10, [xBP + 020h] ; pCtx 1357 mov eax, [r10 + CPUMCTX.fXStateMask] 1358 test eax, eax 1359 jz .guest_fpu_state_manually 1360 1361 ; 1362 ; Using XSAVE. 1363 ; 1364 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS 1365 xor edx, edx 1366 mov r10, [r10 + CPUMCTX.pXStateR0] 1367 xrstor [r10] 1368 1369 ; Make the call (same as in the other case ). 1370 mov r11, [xBP + 38h] ; pfnVMRun 1371 mov r10, [xBP + 30h] ; pVCpu 1372 mov [xSP + 020h], r10 1373 mov rcx, [xBP + 010h] ; pVMCBHostPhys 1374 mov rdx, [xBP + 018h] ; pVMCBPhys 1375 mov r8, [xBP + 020h] ; pCtx 1376 mov r9, [xBP + 028h] ; pVM 1377 call r11 1378 1379 mov r11d, eax ; save return value (xsave below uses eax) 1380 1381 ; Save the guest XMM registers. 1382 mov r10, [xBP + 020h] ; pCtx 1383 mov eax, [r10 + CPUMCTX.fXStateMask] 1384 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS 1385 xor edx, edx 1386 mov r10, [r10 + CPUMCTX.pXStateR0] 1387 xsave [r10] 1388 1389 mov eax, r11d ; restore return value. 1390 1391 .restore_non_volatile_host_xmm_regs: 1392 ; Load the non-volatile host XMM registers. 1238 1393 movdqa xmm6, [rsp + 040h + 000h] 1239 1394 movdqa xmm7, [rsp + 040h + 010h] … … 1248 1403 leave 1249 1404 ret 1250 ENDPROC HMR0VMXStartVMWrapXMM 1251 1252 ;; 1253 ; Wrapper around svm.pfnVMRun that preserves host XMM registers and 1254 ; load the guest ones when necessary. 1255 ; 1256 ; @cproto DECLASM(int) HMR0SVMRunWrapXMM(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHMSVMVMRUN pfnVMRun); 1257 ; 1258 ; @returns eax 1259 ; 1260 ; @param pVMCBHostPhys msc:rcx 1261 ; @param pVMCBPhys msc:rdx 1262 ; @param pCtx msc:r8 1263 ; @param pVM msc:r9 1264 ; @param pVCpu msc:[rbp+30h] 1265 ; @param pfnVMRun msc:[rbp+38h] 1266 ; 1267 ; @remarks This is essentially the same code as HMR0VMXStartVMWrapXMM, only the parameters differ a little bit. 1268 ; 1269 ; ASSUMING 64-bit and windows for now. 1270 ALIGNCODE(16) 1271 BEGINPROC HMR0SVMRunWrapXMM 1272 push xBP 1273 mov xBP, xSP 1274 sub xSP, 0a0h + 040h ; Don't bother optimizing the frame size. 1275 1276 ; spill input parameters. 1277 mov [xBP + 010h], rcx ; pVMCBHostPhys 1278 mov [xBP + 018h], rdx ; pVMCBPhys 1279 mov [xBP + 020h], r8 ; pCtx 1280 mov [xBP + 028h], r9 ; pVM 1281 1282 ; Ask CPUM whether we've started using the FPU yet. 1283 mov rcx, [xBP + 30h] ; pVCpu 1284 call NAME(CPUMIsGuestFPUStateActive) 1285 test al, al 1286 jnz .guest_fpu_state_active 1287 1288 ; No need to mess with XMM registers just call the start routine and return. 1289 mov r11, [xBP + 38h] ; pfnVMRun 1290 mov r10, [xBP + 30h] ; pVCpu 1291 mov [xSP + 020h], r10 1292 mov rcx, [xBP + 010h] ; pVMCBHostPhys 1293 mov rdx, [xBP + 018h] ; pVMCBPhys 1294 mov r8, [xBP + 020h] ; pCtx 1295 mov r9, [xBP + 028h] ; pVM 1296 call r11 1297 1298 leave 1299 ret 1300 1301 ALIGNCODE(8) 1302 .guest_fpu_state_active: 1303 ; Save the host XMM registers. 1304 movdqa [rsp + 040h + 000h], xmm6 1305 movdqa [rsp + 040h + 010h], xmm7 1306 movdqa [rsp + 040h + 020h], xmm8 1307 movdqa [rsp + 040h + 030h], xmm9 1308 movdqa [rsp + 040h + 040h], xmm10 1309 movdqa [rsp + 040h + 050h], xmm11 1310 movdqa [rsp + 040h + 060h], xmm12 1311 movdqa [rsp + 040h + 070h], xmm13 1312 movdqa [rsp + 040h + 080h], xmm14 1313 movdqa [rsp + 040h + 090h], xmm15 1314 1405 1406 ; 1407 ; No XSAVE, load and save the guest XMM registers manually. 1408 ; 1409 .guest_fpu_state_manually: 1315 1410 ; Load the full guest XMM register state. 1316 mov r10, [xBP + 020h] ; pCtx1317 1411 mov r10, [r10 + CPUMCTX.pXStateR0] 1318 1412 movdqa xmm0, [r10 + XMM_OFF_IN_X86FXSTATE + 000h] … … 1362 1456 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0e0h], xmm14 1363 1457 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0f0h], xmm15 1364 1365 ; Load the host XMM registers. 1366 movdqa xmm6, [rsp + 040h + 000h] 1367 movdqa xmm7, [rsp + 040h + 010h] 1368 movdqa xmm8, [rsp + 040h + 020h] 1369 movdqa xmm9, [rsp + 040h + 030h] 1370 movdqa xmm10, [rsp + 040h + 040h] 1371 movdqa xmm11, [rsp + 040h + 050h] 1372 movdqa xmm12, [rsp + 040h + 060h] 1373 movdqa xmm13, [rsp + 040h + 070h] 1374 movdqa xmm14, [rsp + 040h + 080h] 1375 movdqa xmm15, [rsp + 040h + 090h] 1376 leave 1377 ret 1458 jmp .restore_non_volatile_host_xmm_regs 1378 1459 ENDPROC HMR0SVMRunWrapXMM 1379 1460
Note:
See TracChangeset
for help on using the changeset viewer.