Changeset 83057 in vbox
- Timestamp:
- Feb 12, 2020 5:07:09 AM (5 years ago)
- svn:sync-xref-src-repo-rev:
- 136094
- Location:
- trunk/src/VBox/VMM/VMMR0
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMR0A.asm
r83029 r83057 1236 1236 1237 1237 1238 ;; @def RESTORE_STATE_VM321239 ; Macro restoring essential host state and updating guest state1240 ; for common host, 32-bit guest for VT-x.1241 %macro RESTORE_STATE_VM32 01242 ; Restore base and limit of the IDTR & GDTR.1243 %ifndef VMX_SKIP_IDTR1244 lidt [xSP]1245 add xSP, xCB * 21246 %endif1247 %ifndef VMX_SKIP_GDTR1248 lgdt [xSP]1249 add xSP, xCB * 21250 %endif1251 1252 push xDI1253 %ifndef VMX_SKIP_TR1254 mov xDI, [xSP + xCB * 3] ; pCtx (*3 to skip the saved xDI, TR, LDTR).1255 %else1256 mov xDI, [xSP + xCB * 2] ; pCtx (*2 to skip the saved xDI, LDTR).1257 %endif1258 1259 mov [ss:xDI + CPUMCTX.eax], eax1260 mov xAX, SPECTRE_FILLER1261 mov [ss:xDI + CPUMCTX.ebx], ebx1262 mov xBX, xAX1263 mov [ss:xDI + CPUMCTX.ecx], ecx1264 mov xCX, xAX1265 mov [ss:xDI + CPUMCTX.edx], edx1266 mov xDX, xAX1267 mov [ss:xDI + CPUMCTX.esi], esi1268 mov xSI, xAX1269 mov [ss:xDI + CPUMCTX.ebp], ebp1270 mov xBP, xAX1271 mov xAX, cr21272 mov [ss:xDI + CPUMCTX.cr2], xAX1273 1274 %ifdef RT_ARCH_AMD641275 pop xAX ; The guest edi we pushed above.1276 mov dword [ss:xDI + CPUMCTX.edi], eax1277 %else1278 pop dword [ss:xDI + CPUMCTX.edi] ; The guest edi we pushed above.1279 %endif1280 1281 ; Fight spectre.1282 INDIRECT_BRANCH_PREDICTION_BARRIER ss:xDI, CPUMCTX_WSF_IBPB_EXIT1283 1284 %ifndef VMX_SKIP_TR1285 ; Restore TSS selector; must mark it as not busy before using ltr (!)1286 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)1287 ; @todo get rid of sgdt1288 pop xBX ; Saved TR1289 sub xSP, xCB * 21290 sgdt [xSP]1291 mov xAX, xBX1292 and eax, X86_SEL_MASK_OFF_RPL ; Mask away TI and RPL bits leaving only the descriptor offset.1293 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.1294 and dword [ss:xAX + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).1295 ltr bx1296 add xSP, xCB * 21297 %endif1298 1299 pop xAX ; Saved LDTR1300 %ifdef RT_ARCH_AMD641301 cmp eax, 01302 je %%skip_ldt_write321303 %endif1304 lldt ax1305 1306 %%skip_ldt_write32:1307 add xSP, xCB ; pCtx1308 1309 ; Restore segment registers.1310 MYPOPSEGS xAX, ax1311 1312 ; Restore the host XCR0 if necessary.1313 pop xCX1314 test ecx, ecx1315 jnz %%xcr0_after_skip1316 pop xAX1317 pop xDX1318 xsetbv ; ecx is already zero.1319 %%xcr0_after_skip:1320 1321 ; Restore general purpose registers.1322 MYPOPAD1323 %endmacro1324 1325 1326 ;;1327 ; Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)1328 ;1329 ; @returns VBox status code1330 ; @param fResume x86:[ebp+8], msc:rcx,gcc:rdi Whether to use vmlauch/vmresume.1331 ; @param pCtx x86:[ebp+c], msc:rdx,gcc:rsi Pointer to the guest-CPU context.1332 ; @param pvUnused x86:[ebp+10],msc:r8, gcc:rdx Unused argument.1333 ; @param pVM x86:[ebp+14],msc:r9, gcc:rcx The cross context VM structure.1334 ; @param pVCpu x86:[ebp+18],msc:[ebp+30],gcc:r8 The cross context virtual CPU structure of the calling EMT.1335 ;1336 ALIGNCODE(16)1337 BEGINPROC VMXR0StartVM321338 push xBP1339 mov xBP, xSP1340 1341 pushf1342 cli1343 1344 ;1345 ; Save all general purpose host registers.1346 ;1347 MYPUSHAD1348 1349 ;1350 ; First we have to write some final guest CPU context registers.1351 ;1352 mov eax, VMX_VMCS_HOST_RIP1353 %ifdef RT_ARCH_AMD641354 lea r10, [.vmlaunch_done wrt rip]1355 vmwrite rax, r101356 %else1357 mov ecx, .vmlaunch_done1358 vmwrite eax, ecx1359 %endif1360 ; Note: assumes success!1361 1362 ;1363 ; Unify input parameter registers.1364 ;1365 %ifdef RT_ARCH_AMD641366 %ifdef ASM_CALL64_GCC1367 ; fResume already in rdi1368 ; pCtx already in rsi1369 mov rbx, rdx ; pvUnused1370 %else1371 mov rdi, rcx ; fResume1372 mov rsi, rdx ; pCtx1373 mov rbx, r8 ; pvUnused1374 %endif1375 %else1376 mov edi, [ebp + 8] ; fResume1377 mov esi, [ebp + 12] ; pCtx1378 mov ebx, [ebp + 16] ; pvUnused1379 %endif1380 1381 ;1382 ; Save the host XCR0 and load the guest one if necessary.1383 ; Note! Trashes rdx and rcx.1384 ;1385 %ifdef ASM_CALL64_MSC1386 mov rax, [xBP + 30h] ; pVCpu1387 %elifdef ASM_CALL64_GCC1388 mov rax, r8 ; pVCpu1389 %else1390 mov eax, [xBP + 18h] ; pVCpu1391 %endif1392 test byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 11393 jz .xcr0_before_skip1394 1395 xor ecx, ecx1396 xgetbv ; Save the host one on the stack.1397 push xDX1398 push xAX1399 1400 mov eax, [xSI + CPUMCTX.aXcr] ; Load the guest one.1401 mov edx, [xSI + CPUMCTX.aXcr + 4]1402 xor ecx, ecx ; paranoia1403 xsetbv1404 1405 push 0 ; Indicate that we must restore XCR0 (popped into ecx, thus 0).1406 jmp .xcr0_before_done1407 1408 .xcr0_before_skip:1409 push 3fh ; indicate that we need not.1410 .xcr0_before_done:1411 1412 ;1413 ; Save segment registers.1414 ; Note! Trashes rdx & rcx, so we moved it here (amd64 case).1415 ;1416 MYPUSHSEGS xAX, ax1417 1418 ; Save the pCtx pointer.1419 push xSI1420 1421 ; Save host LDTR.1422 xor eax, eax1423 sldt ax1424 push xAX1425 1426 %ifndef VMX_SKIP_TR1427 ; The host TR limit is reset to 0x67; save & restore it manually.1428 str eax1429 push xAX1430 %endif1431 1432 %ifndef VMX_SKIP_GDTR1433 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!1434 sub xSP, xCB * 21435 sgdt [xSP]1436 %endif1437 %ifndef VMX_SKIP_IDTR1438 sub xSP, xCB * 21439 sidt [xSP]1440 %endif1441 1442 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).1443 mov xBX, [xSI + CPUMCTX.cr2]1444 mov xDX, cr21445 cmp xBX, xDX1446 je .skip_cr2_write321447 mov cr2, xBX1448 1449 .skip_cr2_write32:1450 mov eax, VMX_VMCS_HOST_RSP1451 vmwrite xAX, xSP1452 ; Note: assumes success!1453 ; Don't mess with ESP anymore!!!1454 1455 ; Fight spectre and similar.1456 INDIRECT_BRANCH_PREDICTION_AND_L1_CACHE_BARRIER xSI, CPUMCTX_WSF_IBPB_ENTRY, CPUMCTX_WSF_L1D_ENTRY, CPUMCTX_WSF_MDS_ENTRY1457 1458 ; Load guest general purpose registers.1459 mov eax, [xSI + CPUMCTX.eax]1460 mov ebx, [xSI + CPUMCTX.ebx]1461 mov ecx, [xSI + CPUMCTX.ecx]1462 mov edx, [xSI + CPUMCTX.edx]1463 mov ebp, [xSI + CPUMCTX.ebp]1464 1465 ; Resume or start VM?1466 cmp xDI, 0 ; fResume1467 1468 ; Load guest edi & esi.1469 mov edi, [xSI + CPUMCTX.edi]1470 mov esi, [xSI + CPUMCTX.esi]1471 1472 je .vmlaunch_launch1473 1474 vmresume1475 jc near .vmxstart_invalid_vmcs_ptr1476 jz near .vmxstart_start_failed1477 jmp .vmlaunch_done; ; Here if vmresume detected a failure.1478 1479 .vmlaunch_launch:1480 vmlaunch1481 jc near .vmxstart_invalid_vmcs_ptr1482 jz near .vmxstart_start_failed1483 jmp .vmlaunch_done; ; Here if vmlaunch detected a failure.1484 1485 ALIGNCODE(16) ;; @todo YASM BUG - this alignment is wrong on darwin, it's 1 byte off.1486 .vmlaunch_done:1487 RESTORE_STATE_VM321488 mov eax, VINF_SUCCESS1489 1490 .vmstart_end:1491 popf1492 pop xBP1493 ret1494 1495 .vmxstart_invalid_vmcs_ptr:1496 RESTORE_STATE_VM321497 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM1498 jmp .vmstart_end1499 1500 .vmxstart_start_failed:1501 RESTORE_STATE_VM321502 mov eax, VERR_VMX_UNABLE_TO_START_VM1503 jmp .vmstart_end1504 1505 ENDPROC VMXR0StartVM321506 1507 1508 1238 %ifdef RT_ARCH_AMD64 1509 1239 ;; @def RESTORE_STATE_VM64 -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r83030 r83057 4155 4155 if (RT_SUCCESS(rc)) 4156 4156 { 4157 /* 4158 * Initialize the hardware-assisted VMX execution handler for guest and nested-guest VMCS. 4159 * The host is always 64-bit since we no longer support 32-bit hosts. 4160 * Currently we have just a single handler for all guest modes as well, see @bugref{6208#c73}. 4161 */ 4162 pVmcsInfo->pfnStartVM = VMXR0StartVM64; 4157 4163 if (!fIsNstGstVmcs) 4158 4164 { … … 4852 4858 PVMCC pVM = pVCpu->CTX_SUFF(pVM); 4853 4859 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 4854 bool const fGstInLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);4855 4856 /*4857 * VMRUN function.4858 * If the guest supports long mode, always use the 64-bit guest handler, see @bugref{6208#c73}.4859 * The host is always 64-bit since we no longer support 32-bit hosts.4860 */4861 if (pVM->hm.s.fAllow64BitGuests)4862 pVmcsInfo->pfnStartVM = VMXR0StartVM64;4863 else4864 pVmcsInfo->pfnStartVM = VMXR0StartVM32;4865 4860 4866 4861 /* … … 4889 4884 * here rather than while merging the guest VMCS controls. 4890 4885 */ 4891 if ( fGstInLongMode)4886 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)) 4892 4887 { 4893 4888 Assert(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME); -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.h
r82968 r83057 46 46 VMMR0DECL(int) VMXR0ImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat); 47 47 VMMR0DECL(VBOXSTRICTRC) VMXR0RunGuestCode(PVMCPUCC pVCpu); 48 DECLASM(int) VMXR0StartVM32(RTHCUINT fResume, PCPUMCTX pCtx, void *pvUnused, PVMCC pVM, PVMCPUCC pVCpu);49 48 DECLASM(int) VMXR0StartVM64(RTHCUINT fResume, PCPUMCTX pCtx, void *pvUnused, PVMCC pVM, PVMCPUCC pVCpu); 50 49 #endif /* IN_RING0 */
Note:
See TracChangeset
for help on using the changeset viewer.