VirtualBox

Changeset 83066 in vbox for trunk


Ignore:
Timestamp:
Feb 13, 2020 4:15:25 AM (5 years ago)
Author:
vboxsync
Message:

VMM/HM: SVM: Drop 32-bit guest switcher and use the 64-bit switcher for all guest code.

Location:
trunk/src/VBox/VMM/VMMR0
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMR0A.asm

    r83057 r83066  
    15211521
    15221522
     1523%ifdef RT_ARCH_AMD64
    15231524;;
    1524 ; Prepares for and executes VMRUN (32-bit register context guests)
    1525 ;
    1526 ; @returns  VBox status code
    1527 ; @param    HCPhysVmcbHost  msc:rcx,gcc:rdi     Physical address of host VMCB.
    1528 ; @param    HCPhysVmcb      msc:rdx,gcc:rsi     Physical address of guest VMCB.
    1529 ; @param    pCtx            msc:r8,gcc:rdx      Pointer to the guest CPU-context.
    1530 ; @param    pVM             msc:r9,gcc:rcx      The cross context VM structure.
    1531 ; @param    pVCpu           msc:[rsp+28],gcc:r8 The cross context virtual CPU structure of the calling EMT.
    1532 ;
    1533 ALIGNCODE(16)
    1534 BEGINPROC SVMR0VMRun32
    1535 %ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame
    1536  %ifdef ASM_CALL64_GCC
    1537     push    r8                ; pVCpu
    1538     push    rcx               ; pVM
    1539     push    rdx               ; pCtx
    1540     push    rsi               ; HCPhysVmcb
    1541     push    rdi               ; HCPhysVmcbHost
    1542  %else
    1543     mov     rax, [rsp + 28h]
    1544     push    rax               ; pVCpu
    1545     push    r9                ; pVM
    1546     push    r8                ; pCtx
    1547     push    rdx               ; HCPhysVmcb
    1548     push    rcx               ; HCPhysVmcbHost
    1549  %endif
    1550     push    0
    1551 %endif
    1552     push    xBP
    1553     mov     xBP, xSP
    1554     pushf
    1555 
    1556     ; Save all general purpose host registers.
    1557     MYPUSHAD
    1558 
    1559     ; Load pCtx into xSI.
    1560     mov     xSI, [xBP + xCB * 2 + RTHCPHYS_CB * 2]  ; pCtx
    1561 
    1562     ; Save the host XCR0 and load the guest one if necessary.
    1563     mov     xAX, [xBP + xCB * 2 + RTHCPHYS_CB * 2 + xCB * 2] ; pVCpu
    1564     test    byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1
    1565     jz      .xcr0_before_skip
    1566 
    1567     xor     ecx, ecx
    1568     xgetbv                                  ; Save the host XCR0 on the stack
    1569     push    xDX
    1570     push    xAX
    1571 
    1572     mov     xSI, [xBP + xCB * 2 + RTHCPHYS_CB * 2]  ; pCtx
    1573     mov     eax, [xSI + CPUMCTX.aXcr]       ; load the guest XCR0
    1574     mov     edx, [xSI + CPUMCTX.aXcr + 4]
    1575     xor     ecx, ecx                        ; paranoia
    1576     xsetbv
    1577 
    1578     push    0                               ; indicate that we must restore XCR0 (popped into ecx, thus 0)
    1579     jmp     .xcr0_before_done
    1580 
    1581 .xcr0_before_skip:
    1582     push    3fh                             ; indicate that we need not restore XCR0
    1583 .xcr0_before_done:
    1584 
    1585     ; Save guest CPU-context pointer for simplifying saving of the GPRs afterwards.
    1586     push    xSI
    1587 
    1588     ; Save host fs, gs, sysenter msr etc.
    1589     mov     xAX, [xBP + xCB * 2]                    ; HCPhysVmcbHost (64 bits physical address; x86: take low dword only)
    1590     push    xAX                                     ; save for the vmload after vmrun
    1591     vmsave
    1592 
    1593     ; Fight spectre.
    1594     INDIRECT_BRANCH_PREDICTION_BARRIER xSI, CPUMCTX_WSF_IBPB_ENTRY
    1595 
    1596     ; Setup xAX for VMLOAD.
    1597     mov     xAX, [xBP + xCB * 2 + RTHCPHYS_CB]      ; HCPhysVmcb (64 bits physical address; x86: take low dword only)
    1598 
    1599     ; Load guest general purpose registers.
    1600     ; eax is loaded from the VMCB by VMRUN.
    1601     mov     ebx, [xSI + CPUMCTX.ebx]
    1602     mov     ecx, [xSI + CPUMCTX.ecx]
    1603     mov     edx, [xSI + CPUMCTX.edx]
    1604     mov     edi, [xSI + CPUMCTX.edi]
    1605     mov     ebp, [xSI + CPUMCTX.ebp]
    1606     mov     esi, [xSI + CPUMCTX.esi]
    1607 
    1608     ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
    1609     clgi
    1610     sti
    1611 
    1612     ; Load guest fs, gs, sysenter msr etc.
    1613     vmload
    1614 
    1615     ; Run the VM.
    1616     vmrun
    1617 
    1618     ; Save guest fs, gs, sysenter msr etc.
    1619     vmsave
    1620 
    1621     ; Load host fs, gs, sysenter msr etc.
    1622     pop     xAX                             ; load HCPhysVmcbHost (pushed above)
    1623     vmload
    1624 
    1625     ; Set the global interrupt flag again, but execute cli to make sure IF=0.
    1626     cli
    1627     stgi
    1628 
    1629     ; Pop the context pointer (pushed above) and save the guest GPRs (sans RSP and RAX).
    1630     pop     xAX
    1631 
    1632     mov     [ss:xAX + CPUMCTX.ebx], ebx
    1633     mov     xBX, SPECTRE_FILLER
    1634     mov     [ss:xAX + CPUMCTX.ecx], ecx
    1635     mov     xCX, xBX
    1636     mov     [ss:xAX + CPUMCTX.edx], edx
    1637     mov     xDX, xBX
    1638     mov     [ss:xAX + CPUMCTX.esi], esi
    1639     mov     xSI, xBX
    1640     mov     [ss:xAX + CPUMCTX.edi], edi
    1641     mov     xDI, xBX
    1642     mov     [ss:xAX + CPUMCTX.ebp], ebp
    1643     mov     xBP, xBX
    1644 
    1645     ; Fight spectre.  Note! Trashes xAX!
    1646     INDIRECT_BRANCH_PREDICTION_BARRIER ss:xAX, CPUMCTX_WSF_IBPB_EXIT
    1647 
    1648     ; Restore the host xcr0 if necessary.
    1649     pop     xCX
    1650     test    ecx, ecx
    1651     jnz     .xcr0_after_skip
    1652     pop     xAX
    1653     pop     xDX
    1654     xsetbv                              ; ecx is already zero
    1655 .xcr0_after_skip:
    1656 
    1657     ; Restore host general purpose registers.
    1658     MYPOPAD
    1659 
    1660     mov     eax, VINF_SUCCESS
    1661 
    1662     popf
    1663     pop     xBP
    1664 %ifdef RT_ARCH_AMD64
    1665     add     xSP, 6*xCB
    1666 %endif
    1667     ret
    1668 ENDPROC SVMR0VMRun32
    1669 
    1670 
    1671 %ifdef RT_ARCH_AMD64
    1672 ;;
    1673 ; Prepares for and executes VMRUN (64-bit register context)
     1525; Prepares for and executes VMRUN (32-bit and 64-bit guests).
    16741526;
    16751527; @returns  VBox status code
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r83029 r83066  
    742742
    743743        /*
     744         * Initialize the hardware-assisted SVM guest-execution handler.
     745         * We now use a single handler for both 32-bit and 64-bit guests, see @bugref{6208#c73}.
     746         */
     747        pVCpu->hm.s.svm.pfnVMRun = SVMR0VMRun;
     748
     749        /*
    744750         * Allocate one page for the host-context VM control block (VMCB). This is used for additional host-state (such as
    745751         * FS, GS, Kernel GS Base, etc.) apart from the host-state save area specified in MSR_K8_VM_HSAVE_PA.
     
    21782184
    21792185/**
    2180  * Selects the appropriate function to run guest code.
    2181  *
    2182  * @param   pVCpu   The cross context virtual CPU structure.
    2183  *
    2184  * @remarks No-long-jump zone!!!
    2185  */
    2186 DECLINLINE(void) hmR0SvmSelectVMRunHandler(PVMCPUCC pVCpu)
    2187 {
    2188     if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
    2189     {
    2190 # if HC_ARCH_BITS != 64 || ARCH_BITS != 64
    2191 #  error "Only 64-bit hosts are supported!"
    2192 # endif
    2193         /* Guest may enter long mode, always use 64-bit handler. */
    2194         pVCpu->hm.s.svm.pfnVMRun = SVMR0VMRun;
    2195     }
    2196     else
    2197     {
    2198         /* Guest is 32-bit only, use the 32-bit handler. */
    2199         pVCpu->hm.s.svm.pfnVMRun = SVMR0VMRun32;
    2200     }
    2201 }
    2202 
    2203 
    2204 /**
    22052186 * Enters the AMD-V session.
    22062187 *
     
    23552336        hmR0SvmExportGuestXcptIntercepts(pVCpu, pVmcb);
    23562337    }
    2357 
    2358     hmR0SvmSelectVMRunHandler(pVCpu);
    23592338
    23602339    /* Clear any bits that may be set but exported unconditionally or unused/reserved bits. */
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.h

    r83029 r83066  
    5454
    5555/**
    56  * Prepares for and executes VMRUN (32-bit register context).
    57  *
    58  * @returns VBox status code.
    59  * @param   pVMCBHostPhys   Physical address of host VMCB.
    60  * @param   pVMCBPhys       Physical address of the VMCB.
    61  * @param   pCtx            Pointer to the guest CPU context.
    62  * @param   pVM             The cross context VM structure. (Not used.)
    63  * @param   pVCpu           The cross context virtual CPU structure. (Not used.)
    64  */
    65 DECLASM(int) SVMR0VMRun32(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVMCC pVM, PVMCPUCC pVCpu);
    66 
    67 
    68 /**
    6956 * Prepares for and executes VMRUN (64-bit register context).
    7057 *
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette