VirtualBox

Changeset 83057 in vbox


Ignore:
Timestamp:
Feb 12, 2020 5:07:09 AM (5 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
136094
Message:

VMM/HM: VMX: Drop 32-bit guest switcher and use the 64-bit switcher for all guest code.

Location:
trunk/src/VBox/VMM/VMMR0
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMR0A.asm

    r83029 r83057  
    12361236
    12371237
    1238 ;; @def RESTORE_STATE_VM32
    1239 ; Macro restoring essential host state and updating guest state
    1240 ; for common host, 32-bit guest for VT-x.
    1241 %macro RESTORE_STATE_VM32 0
    1242     ; Restore base and limit of the IDTR & GDTR.
    1243  %ifndef VMX_SKIP_IDTR
    1244     lidt    [xSP]
    1245     add     xSP, xCB * 2
    1246  %endif
    1247  %ifndef VMX_SKIP_GDTR
    1248     lgdt    [xSP]
    1249     add     xSP, xCB * 2
    1250  %endif
    1251 
    1252     push    xDI
    1253  %ifndef VMX_SKIP_TR
    1254     mov     xDI, [xSP + xCB * 3]         ; pCtx (*3 to skip the saved xDI, TR, LDTR).
    1255  %else
    1256     mov     xDI, [xSP + xCB * 2]         ; pCtx (*2 to skip the saved xDI, LDTR).
    1257  %endif
    1258 
    1259     mov     [ss:xDI + CPUMCTX.eax], eax
    1260     mov     xAX, SPECTRE_FILLER
    1261     mov     [ss:xDI + CPUMCTX.ebx], ebx
    1262     mov     xBX, xAX
    1263     mov     [ss:xDI + CPUMCTX.ecx], ecx
    1264     mov     xCX, xAX
    1265     mov     [ss:xDI + CPUMCTX.edx], edx
    1266     mov     xDX, xAX
    1267     mov     [ss:xDI + CPUMCTX.esi], esi
    1268     mov     xSI, xAX
    1269     mov     [ss:xDI + CPUMCTX.ebp], ebp
    1270     mov     xBP, xAX
    1271     mov     xAX, cr2
    1272     mov     [ss:xDI + CPUMCTX.cr2], xAX
    1273 
    1274  %ifdef RT_ARCH_AMD64
    1275     pop     xAX                                 ; The guest edi we pushed above.
    1276     mov     dword [ss:xDI + CPUMCTX.edi], eax
    1277  %else
    1278     pop     dword [ss:xDI + CPUMCTX.edi]        ; The guest edi we pushed above.
    1279  %endif
    1280 
    1281     ; Fight spectre.
    1282     INDIRECT_BRANCH_PREDICTION_BARRIER ss:xDI, CPUMCTX_WSF_IBPB_EXIT
    1283 
    1284  %ifndef VMX_SKIP_TR
    1285     ; Restore TSS selector; must mark it as not busy before using ltr (!)
    1286     ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
    1287     ; @todo get rid of sgdt
    1288     pop     xBX         ; Saved TR
    1289     sub     xSP, xCB * 2
    1290     sgdt    [xSP]
    1291     mov     xAX, xBX
    1292     and     eax, X86_SEL_MASK_OFF_RPL               ; Mask away TI and RPL bits leaving only the descriptor offset.
    1293     add     xAX, [xSP + 2]                          ; eax <- GDTR.address + descriptor offset.
    1294     and     dword [ss:xAX + 4], ~RT_BIT(9)          ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).
    1295     ltr     bx
    1296     add     xSP, xCB * 2
    1297  %endif
    1298 
    1299     pop     xAX         ; Saved LDTR
    1300  %ifdef RT_ARCH_AMD64
    1301     cmp     eax, 0
    1302     je      %%skip_ldt_write32
    1303  %endif
    1304     lldt    ax
    1305 
    1306 %%skip_ldt_write32:
    1307     add     xSP, xCB     ; pCtx
    1308 
    1309     ; Restore segment registers.
    1310     MYPOPSEGS xAX, ax
    1311 
    1312     ; Restore the host XCR0 if necessary.
    1313     pop     xCX
    1314     test    ecx, ecx
    1315     jnz     %%xcr0_after_skip
    1316     pop     xAX
    1317     pop     xDX
    1318     xsetbv                              ; ecx is already zero.
    1319 %%xcr0_after_skip:
    1320 
    1321     ; Restore general purpose registers.
    1322     MYPOPAD
    1323 %endmacro
    1324 
    1325 
    1326 ;;
    1327 ; Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
    1328 ;
    1329 ; @returns VBox status code
    1330 ; @param    fResume    x86:[ebp+8], msc:rcx,gcc:rdi     Whether to use vmlauch/vmresume.
    1331 ; @param    pCtx       x86:[ebp+c], msc:rdx,gcc:rsi     Pointer to the guest-CPU context.
    1332 ; @param    pvUnused   x86:[ebp+10],msc:r8, gcc:rdx     Unused argument.
    1333 ; @param    pVM        x86:[ebp+14],msc:r9, gcc:rcx     The cross context VM structure.
    1334 ; @param    pVCpu      x86:[ebp+18],msc:[ebp+30],gcc:r8 The cross context virtual CPU structure of the calling EMT.
    1335 ;
    1336 ALIGNCODE(16)
    1337 BEGINPROC VMXR0StartVM32
    1338     push    xBP
    1339     mov     xBP, xSP
    1340 
    1341     pushf
    1342     cli
    1343 
    1344     ;
    1345     ; Save all general purpose host registers.
    1346     ;
    1347     MYPUSHAD
    1348 
    1349     ;
    1350     ; First we have to write some final guest CPU context registers.
    1351     ;
    1352     mov     eax, VMX_VMCS_HOST_RIP
    1353 %ifdef RT_ARCH_AMD64
    1354     lea     r10, [.vmlaunch_done wrt rip]
    1355     vmwrite rax, r10
    1356 %else
    1357     mov     ecx, .vmlaunch_done
    1358     vmwrite eax, ecx
    1359 %endif
    1360     ; Note: assumes success!
    1361 
    1362     ;
    1363     ; Unify input parameter registers.
    1364     ;
    1365 %ifdef RT_ARCH_AMD64
    1366  %ifdef ASM_CALL64_GCC
    1367     ; fResume already in rdi
    1368     ; pCtx    already in rsi
    1369     mov     rbx, rdx        ; pvUnused
    1370  %else
    1371     mov     rdi, rcx        ; fResume
    1372     mov     rsi, rdx        ; pCtx
    1373     mov     rbx, r8         ; pvUnused
    1374  %endif
    1375 %else
    1376     mov     edi, [ebp + 8]  ; fResume
    1377     mov     esi, [ebp + 12] ; pCtx
    1378     mov     ebx, [ebp + 16] ; pvUnused
    1379 %endif
    1380 
    1381     ;
    1382     ; Save the host XCR0 and load the guest one if necessary.
    1383     ; Note! Trashes rdx and rcx.
    1384     ;
    1385 %ifdef ASM_CALL64_MSC
    1386     mov     rax, [xBP + 30h]            ; pVCpu
    1387 %elifdef ASM_CALL64_GCC
    1388     mov     rax, r8                     ; pVCpu
    1389 %else
    1390     mov     eax, [xBP + 18h]            ; pVCpu
    1391 %endif
    1392     test    byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1
    1393     jz      .xcr0_before_skip
    1394 
    1395     xor     ecx, ecx
    1396     xgetbv                              ; Save the host one on the stack.
    1397     push    xDX
    1398     push    xAX
    1399 
    1400     mov     eax, [xSI + CPUMCTX.aXcr]   ; Load the guest one.
    1401     mov     edx, [xSI + CPUMCTX.aXcr + 4]
    1402     xor     ecx, ecx                    ; paranoia
    1403     xsetbv
    1404 
    1405     push    0                           ; Indicate that we must restore XCR0 (popped into ecx, thus 0).
    1406     jmp     .xcr0_before_done
    1407 
    1408 .xcr0_before_skip:
    1409     push    3fh                         ; indicate that we need not.
    1410 .xcr0_before_done:
    1411 
    1412     ;
    1413     ; Save segment registers.
    1414     ; Note! Trashes rdx & rcx, so we moved it here (amd64 case).
    1415     ;
    1416     MYPUSHSEGS xAX, ax
    1417 
    1418     ; Save the pCtx pointer.
    1419     push    xSI
    1420 
    1421     ; Save host LDTR.
    1422     xor     eax, eax
    1423     sldt    ax
    1424     push    xAX
    1425 
    1426 %ifndef VMX_SKIP_TR
    1427     ; The host TR limit is reset to 0x67; save & restore it manually.
    1428     str     eax
    1429     push    xAX
    1430 %endif
    1431 
    1432 %ifndef VMX_SKIP_GDTR
    1433     ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
    1434     sub     xSP, xCB * 2
    1435     sgdt    [xSP]
    1436 %endif
    1437 %ifndef VMX_SKIP_IDTR
    1438     sub     xSP, xCB * 2
    1439     sidt    [xSP]
    1440 %endif
    1441 
    1442     ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
    1443     mov     xBX, [xSI + CPUMCTX.cr2]
    1444     mov     xDX, cr2
    1445     cmp     xBX, xDX
    1446     je      .skip_cr2_write32
    1447     mov     cr2, xBX
    1448 
    1449 .skip_cr2_write32:
    1450     mov     eax, VMX_VMCS_HOST_RSP
    1451     vmwrite xAX, xSP
    1452     ; Note: assumes success!
    1453     ; Don't mess with ESP anymore!!!
    1454 
    1455     ; Fight spectre and similar.
    1456     INDIRECT_BRANCH_PREDICTION_AND_L1_CACHE_BARRIER xSI, CPUMCTX_WSF_IBPB_ENTRY, CPUMCTX_WSF_L1D_ENTRY, CPUMCTX_WSF_MDS_ENTRY
    1457 
    1458     ; Load guest general purpose registers.
    1459     mov     eax, [xSI + CPUMCTX.eax]
    1460     mov     ebx, [xSI + CPUMCTX.ebx]
    1461     mov     ecx, [xSI + CPUMCTX.ecx]
    1462     mov     edx, [xSI + CPUMCTX.edx]
    1463     mov     ebp, [xSI + CPUMCTX.ebp]
    1464 
    1465     ; Resume or start VM?
    1466     cmp     xDI, 0                  ; fResume
    1467 
    1468     ; Load guest edi & esi.
    1469     mov     edi, [xSI + CPUMCTX.edi]
    1470     mov     esi, [xSI + CPUMCTX.esi]
    1471 
    1472     je      .vmlaunch_launch
    1473 
    1474     vmresume
    1475     jc      near .vmxstart_invalid_vmcs_ptr
    1476     jz      near .vmxstart_start_failed
    1477     jmp     .vmlaunch_done;      ; Here if vmresume detected a failure.
    1478 
    1479 .vmlaunch_launch:
    1480     vmlaunch
    1481     jc      near .vmxstart_invalid_vmcs_ptr
    1482     jz      near .vmxstart_start_failed
    1483     jmp     .vmlaunch_done;      ; Here if vmlaunch detected a failure.
    1484 
    1485 ALIGNCODE(16) ;; @todo YASM BUG - this alignment is wrong on darwin, it's 1 byte off.
    1486 .vmlaunch_done:
    1487     RESTORE_STATE_VM32
    1488     mov     eax, VINF_SUCCESS
    1489 
    1490 .vmstart_end:
    1491     popf
    1492     pop     xBP
    1493     ret
    1494 
    1495 .vmxstart_invalid_vmcs_ptr:
    1496     RESTORE_STATE_VM32
    1497     mov     eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
    1498     jmp     .vmstart_end
    1499 
    1500 .vmxstart_start_failed:
    1501     RESTORE_STATE_VM32
    1502     mov     eax, VERR_VMX_UNABLE_TO_START_VM
    1503     jmp     .vmstart_end
    1504 
    1505 ENDPROC VMXR0StartVM32
    1506 
    1507 
    15081238%ifdef RT_ARCH_AMD64
    15091239;; @def RESTORE_STATE_VM64
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r83030 r83057  
    41554155        if (RT_SUCCESS(rc))
    41564156        {
     4157            /*
     4158             * Initialize the hardware-assisted VMX execution handler for guest and nested-guest VMCS.
     4159             * The host is always 64-bit since we no longer support 32-bit hosts.
     4160             * Currently we have just a single handler for all guest modes as well, see @bugref{6208#c73}.
     4161             */
     4162            pVmcsInfo->pfnStartVM = VMXR0StartVM64;
    41574163            if (!fIsNstGstVmcs)
    41584164            {
     
    48524858        PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    48534859        PVMXVMCSINFO pVmcsInfo      = pVmxTransient->pVmcsInfo;
    4854         bool const   fGstInLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
    4855 
    4856         /*
    4857          * VMRUN function.
    4858          * If the guest supports long mode, always use the 64-bit guest handler, see @bugref{6208#c73}.
    4859          * The host is always 64-bit since we no longer support 32-bit hosts.
    4860          */
    4861         if (pVM->hm.s.fAllow64BitGuests)
    4862             pVmcsInfo->pfnStartVM = VMXR0StartVM64;
    4863         else
    4864             pVmcsInfo->pfnStartVM = VMXR0StartVM32;
    48654860
    48664861        /*
     
    48894884             * here rather than while merging the guest VMCS controls.
    48904885             */
    4891             if (fGstInLongMode)
     4886            if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
    48924887            {
    48934888                Assert(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME);
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.h

    r82968 r83057  
    4646VMMR0DECL(int)          VMXR0ImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat);
    4747VMMR0DECL(VBOXSTRICTRC) VMXR0RunGuestCode(PVMCPUCC pVCpu);
    48 DECLASM(int)            VMXR0StartVM32(RTHCUINT fResume, PCPUMCTX pCtx, void *pvUnused, PVMCC pVM, PVMCPUCC pVCpu);
    4948DECLASM(int)            VMXR0StartVM64(RTHCUINT fResume, PCPUMCTX pCtx, void *pvUnused, PVMCC pVM, PVMCPUCC pVCpu);
    5049#endif /* IN_RING0 */
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette