VirtualBox

Changeset 46267 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
May 26, 2013 11:29:24 AM (12 years ago)
Author:
vboxsync
Message:

VMM: Optimized 64-bit host VT-x world-switch.

Location:
trunk/src/VBox/VMM
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMR0A.asm

    r45955 r46267  
    5858   %define HM_64_BIT_USE_NULL_SEL
    5959  %endif
     60 %endif
     61%endif
     62
     63%ifndef VBOX_WITH_OLD_VTX_CODE
     64 %ifdef RT_ARCH_AMD64
     65  %define VBOX_SKIP_RESTORE_SEG
    6066 %endif
    6167%endif
     
    155161%endif
    156162
     163%ifdef VBOX_SKIP_RESTORE_SEG
     164%macro MYPUSHSEGS64 2
     165%endmacro
     166
     167%macro MYPOPSEGS64 2
     168%endmacro
     169%else ; !VBOX_SKIP_RESTORE_SEG
    157170; trashes, rax, rdx & rcx
    158171%macro MYPUSHSEGS64 2
     
    218231 %endif
    219232%endmacro
     233%endif ; VBOX_SKIP_RESTORE_SEG
    220234
    221235%macro MYPUSHAD32 0
     
    270284
    271285BEGINCODE
     286
     287
     288;/**
     289; * Restores host-state fields.
     290; *
     291; * @returns VBox status code
     292; * @param   u32RestoreHostFlags  x86: [ebp + 08h]  msc: rcx  gcc: rdi   u32RestoreHost - RestoreHost flags.
     293; * @param   pRestoreHost         x86: [ebp + 0ch]  msc: rdx  gcc: rsi   pRestoreHost - Pointer to the RestoreHost struct.
     294; */
     295ALIGNCODE(16)
     296BEGINPROC VMXRestoreHostState
     297%ifdef RT_ARCH_AMD64
     298 %ifndef ASM_CALL64_GCC
     299    ; On msc R10, R11 are scratch, RDI and RSI are not. So we must save and restore them!
     300    mov         r10, rdi
     301    mov         r11, rsi
     302    ; Switch to common register usage (i.e. gcc's in this function)
     303    mov         rdi, rcx
     304    mov         rsi, rdx
     305 %endif
     306
     307    test        edi, VMX_RESTORE_HOST_GDTR
     308    jz          near .test_idtr
     309    lgdt        [rsi + 18h]                ; pRestoreHost->HostGdtr
     310
     311.test_idtr:
     312    test        edi, VMX_RESTORE_HOST_IDTR
     313    jz          near .test_ds
     314    lidt        [rsi + 22h]                ; pRestoreHost->HostIdtr
     315
     316.test_ds:
     317    test        edi, VMX_RESTORE_HOST_SEL_DS
     318    jz          near .test_es
     319    mov         ax, word [rsi]             ; pRestoreHost->uHostSelDS
     320    mov         ds, ax             
     321
     322.test_es:
     323    test        edi, VMX_RESTORE_HOST_SEL_ES
     324    jz          near .test_fs
     325    mov         ax, word [rsi + 2]         ; pRestoreHost->uHostSelES
     326    mov         es, ax
     327
     328.test_fs:
     329    ; We're only restoring the selector. The base is valid and restored by VT-x. If we get an interrupt in between FS & GS
     330    ; below, we are fine as the base is what is relevant in 64-bit mode. We need to disable interrupts only during
     331    ; writing of the selector as that zaps (trashes) the upper-part of the base until we wrmsr the full 64-bit base.
     332
     333    test        edi, VMX_RESTORE_HOST_SEL_FS
     334    jz          near .test_gs
     335    mov         ax, word [rsi + 4]        ; pRestoreHost->uHostSelFS
     336    cli                                   ; Disable interrupts as mov fs, ax will zap the upper part of the base
     337    mov         fs, ax
     338    mov         eax, dword [rsi + 8]      ; pRestoreHost->uHostFSBase - Lo
     339    mov         edx, dword [rsi + 0Ch]    ; pRestoreHost->uHostFSBase - Hi
     340    mov         ecx, MSR_K8_FS_BASE
     341    wrmsr
     342    sti                                   ; Re-enable interrupts as fsbase is consistent now
     343
     344.test_gs:
     345    test        edi, VMX_RESTORE_HOST_SEL_GS
     346    jz          near .restore_success
     347    mov         ax, word [rsi + 6]        ; pRestoreHost->uHostSelGS
     348    cli                                   ; Disable interrupts as mov gs, ax will zap the upper part of the base
     349    mov         gs, ax
     350    mov         eax, dword [rsi + 10h]    ; pRestoreHost->uHostGSBase - Lo
     351    mov         edx, dword [rsi + 14h]    ; pRestoreHost->uHostGSBase - Hi
     352    mov         ecx, MSR_K8_GS_BASE
     353    wrmsr
     354    sti                                   ; Re-enable interrupts as gsbase is consistent now
     355
     356.restore_success:
     357    mov         eax, VINF_SUCCESS
     358 %ifndef ASM_CALL64_GCC
     359    ; Restore RDI and RSI on MSC.
     360    mov         rdi, r10
     361    mov         rsi, r11
     362 %endif
     363%else  ; RT_ARCH_X86
     364    mov         eax, VERR_NOT_IMPLEMENTED
     365%endif
     366    ret
     367ENDPROC VMXRestoreHostState
    272368
    273369
  • trunk/src/VBox/VMM/VMMR0/HMR0Mixed.mac

    r46099 r46267  
    1818;
    1919
     20%ifndef VBOX_WITH_OLD_VTX_CODE
     21 %ifdef RT_ARCH_AMD64
     22  %define VMX_SKIP_GDTR_IDTR
     23 %endif
     24%endif
    2025
    2126;/**
     
    116121
    117122    ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
     123%ifdef VMX_SKIP_GDTR_IDTR
    118124    sub     xSP, xS*2
    119125    sgdt    [xSP]
     
    121127    sub     xSP, xS*2
    122128    sidt    [xSP]
     129%endif
    123130
    124131%ifdef VBOX_WITH_DR6_EXPERIMENT
     
    173180
    174181    ; Restore base and limit of the IDTR & GDTR.
     182%ifdef VMX_SKIP_GDTR_IDTR
    175183    lidt    [xSP]
    176184    add     xSP, xS*2
    177185    lgdt    [xSP]
    178186    add     xSP, xS*2
     187%endif
    179188
    180189    push    xDI
     
    263272.vmxstart_invalid_vmcs_ptr:
    264273    ; Restore base and limit of the IDTR & GDTR
     274%ifdef VMX_SKIP_GDTR_IDTR
    265275    lidt    [xSP]
    266276    add     xSP, xS*2
    267277    lgdt    [xSP]
    268278    add     xSP, xS*2
     279%endif
    269280
    270281    ; Restore TSS selector; must mark it as not busy before using ltr (!)
     
    300311.vmxstart_start_failed:
    301312    ; Restore base and limit of the IDTR & GDTR.
     313%ifdef VMX_SKIP_GDTR_IDTR
    302314    lidt    [xSP]
    303315    add     xSP, xS*2
    304316    lgdt    [xSP]
    305317    add     xSP, xS*2
     318%endif
    306319
    307320    ; Restore TSS selector; must mark it as not busy before using ltr (!)
     
    437450
    438451    ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
     452%ifdef VMX_SKIP_GDTR_IDTR
    439453    sub     xSP, xS*2
    440454    sgdt    [xSP]
     
    442456    sub     xSP, xS*2
    443457    sidt    [xSP]
     458%endif
    444459
    445460%ifdef VBOX_WITH_DR6_EXPERIMENT
     
    502517
    503518    ; Restore base and limit of the IDTR & GDTR
     519%ifdef VMX_SKIP_GDTR_IDTR
    504520    lidt    [xSP]
    505521    add     xSP, xS*2
    506522    lgdt    [xSP]
    507523    add     xSP, xS*2
     524%endif
    508525
    509526    push    xDI
     
    609626.vmxstart64_invalid_vmcs_ptr:
    610627    ; Restore base and limit of the IDTR & GDTR.
     628%ifdef VMX_SKIP_GDTR_IDTR
    611629    lidt    [xSP]
    612630    add     xSP, xS*2
    613631    lgdt    [xSP]
    614632    add     xSP, xS*2
     633%endif
    615634
    616635    ; Restore TSS selector; must mark it as not busy before using ltr (!)
     
    659678.vmxstart64_start_failed:
    660679    ; Restore base and limit of the IDTR & GDTR.
     680%ifdef VMX_SKIP_GDTR_IDTR
    661681    lidt    [xSP]
    662682    add     xSP, xS*2
    663683    lgdt    [xSP]
    664684    add     xSP, xS*2
     685%endif
    665686
    666687    ; Restore TSS selector; must mark it as not busy before using ltr (!)
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r46192 r46267  
    20912091{
    20922092    int rc = VERR_INTERNAL_ERROR_5;
    2093     RTSEL uSelCS = 0;
    2094     RTSEL uSelSS = 0;
    20952093    RTSEL uSelDS = 0;
    20962094    RTSEL uSelES = 0;
     
    21002098
    21012099    /*
    2102      * Host Selector registers.
     2100     * Host DS, ES, FS and GS segment registers.
    21032101     */
     2102#if HC_ARCH_BITS == 64
     2103    pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
     2104    uSelDS = ASMGetDS();
     2105    uSelES = ASMGetES();
     2106    uSelFS = ASMGetFS();
     2107    uSelGS = ASMGetGS();
     2108#endif
     2109
     2110    /*
     2111     * Host CS and SS segment registers.
     2112     */
     2113    RTSEL uSelCS;
     2114    RTSEL uSelSS;
    21042115#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    21052116    if (HMVMX_IS_64BIT_HOST_MODE())
     
    21192130#endif
    21202131
    2121     /* Note: VT-x is picky about the RPL of the selectors here; we'll restore them manually. */
     2132    /*
     2133     * Host TR segment register.
     2134     */
    21222135    uSelTR = ASMGetTR();
    21232136
     2137#if HC_ARCH_BITS == 64
     2138    /*
     2139     * Determine if the host segment registers are suitable for VT-x. Otherwise use zero to gain VM-entry and restore them
     2140     * before we get preempted. See Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers".
     2141     */
     2142    if (uSelDS & (X86_SEL_RPL | X86_SEL_LDT))
     2143    {
     2144        pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_DS;
     2145        pVCpu->hm.s.vmx.RestoreHost.uHostSelDS = uSelDS;
     2146        uSelDS = 0;
     2147    }
     2148    if (uSelES & (X86_SEL_RPL | X86_SEL_LDT))
     2149    {
     2150        pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_ES;
     2151        pVCpu->hm.s.vmx.RestoreHost.uHostSelES = uSelES;
     2152        uSelES = 0;
     2153    }
     2154    if (uSelFS & (X86_SEL_RPL | X86_SEL_LDT))
     2155    {
     2156        pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_FS;
     2157        pVCpu->hm.s.vmx.RestoreHost.uHostSelFS = uSelFS;
     2158        uSelFS = 0;
     2159    }
     2160    if (uSelGS & (X86_SEL_RPL | X86_SEL_LDT))
     2161    {
     2162        pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_GS;
     2163        pVCpu->hm.s.vmx.RestoreHost.uHostSelGS = uSelGS;
     2164        uSelGS = 0;
     2165    }
     2166#endif
     2167
    21242168    /* Verification based on Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers"  */
    2125     /** @todo Verify if we have any platform that actually run with DS or ES with
    2126      *        RPL != 0 in kernel space. */
    21272169    Assert(!(uSelCS & X86_SEL_RPL)); Assert(!(uSelCS & X86_SEL_LDT));
    21282170    Assert(!(uSelSS & X86_SEL_RPL)); Assert(!(uSelSS & X86_SEL_LDT));
     
    21442186    rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_CS, uSelCS);      AssertRCReturn(rc, rc);
    21452187    rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_SS, uSelSS);      AssertRCReturn(rc, rc);
    2146     /* Avoid the VMWRITEs as we set the following segments to 0 and the VMCS fields are already  0 (since g_HvmR0 is static) */
    2147 #if 0
     2188#if HC_ARCH_BITS == 64
    21482189    rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_DS, uSelDS);      AssertRCReturn(rc, rc);
    21492190    rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_ES, uSelES);      AssertRCReturn(rc, rc);
     
    21562197     * Host GDTR and IDTR.
    21572198     */
    2158     /** @todo Despite VT-x -not- restoring the limits on GDTR and IDTR it should
    2159      *        be safe to -not- save and restore GDTR and IDTR in the assembly
    2160      *        code and just do it here and don't care if the limits are zapped on
    2161      *        VM-exit. */
    21622199    RTGDTR Gdtr;
    21632200    RT_ZERO(Gdtr);
     
    21822219        rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, Gdtr.pGdt);      AssertRCReturn(rc, rc);
    21832220        rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, Idtr.pIdt);      AssertRCReturn(rc, rc);
     2221
     2222#if HC_ARCH_BITS == 64
     2223        /*
     2224         * Determine if we need to manually need to restore the GDTR and IDTR limits as VT-x zaps them to the
     2225         * maximum limit (0xffff) on every VM-exit.
     2226         */
     2227        if (Gdtr.cbGdt != 0xffff)
     2228        {
     2229            pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_GDTR;
     2230            AssertCompile(sizeof(Gdtr) == sizeof(X86XDTR64));
     2231            memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostGdtr, &Gdtr, sizeof(X86XDTR64));
     2232        }
     2233
     2234        /*
     2235         * IDT limit is practically 0xfff. Therefore if the host has the limit as 0xfff, VT-x bloating the limit to 0xffff
     2236         * is not a problem as it's not possible to get at them anyway. See Intel spec. 6.14.1 "64-Bit Mode IDT" and
     2237         * Intel spec. 6.2 "Exception and Interrupt Vectors".
     2238         */
     2239        if (Idtr.cbIdt < 0x0fff)
     2240        {
     2241            pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_IDTR;
     2242            AssertCompile(sizeof(Idtr) == sizeof(X86XDTR64));
     2243            memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostIdtr, &Idtr, sizeof(X86XDTR64));
     2244        }
     2245#endif
    21842246    }
    21852247
     
    21902252    if ((uSelTR & X86_SEL_MASK) > Gdtr.cbGdt)
    21912253    {
    2192         AssertMsgFailed(("hmR0VmxSaveHostSegmentRegs: TR selector exceeds limit.TR=%RTsel Gdtr.cbGdt=%#x\n", uSelTR, Gdtr.cbGdt));
     2254        AssertMsgFailed(("hmR0VmxSaveHostSegmentRegs: TR selector exceeds limit. TR=%RTsel cbGdt=%#x\n", uSelTR, Gdtr.cbGdt));
    21932255        return VERR_VMX_INVALID_HOST_STATE;
    21942256    }
     
    22172279    /*
    22182280     * Host FS base and GS base.
    2219      * For 32-bit hosts the base is handled by the assembly code where we push/pop FS and GS which                                                                .
    2220      * would take care of the bases. In 64-bit, the MSRs come into play.
    22212281     */
    22222282#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     
    22252285        uint64_t u64FSBase = ASMRdMsr(MSR_K8_FS_BASE);
    22262286        uint64_t u64GSBase = ASMRdMsr(MSR_K8_GS_BASE);
    2227         rc = VMXWriteVmcs64(VMX_VMCS_HOST_FS_BASE, u64FSBase);
    2228         AssertRCReturn(rc, rc);
    2229         rc = VMXWriteVmcs64(VMX_VMCS_HOST_GS_BASE, u64GSBase);
    2230         AssertRCReturn(rc, rc);
     2287        rc = VMXWriteVmcs64(VMX_VMCS_HOST_FS_BASE, u64FSBase);          AssertRCReturn(rc, rc);
     2288        rc = VMXWriteVmcs64(VMX_VMCS_HOST_GS_BASE, u64GSBase);          AssertRCReturn(rc, rc);
     2289
     2290# if HC_ARCH_BITS == 64
     2291        /* Store the base if we have to restore FS or GS manually as we need to restore the base as well. */
     2292        if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_FS)
     2293            pVCpu->hm.s.vmx.RestoreHost.uHostFSBase = u64FSBase;
     2294        if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_GS)
     2295            pVCpu->hm.s.vmx.RestoreHost.uHostGSBase = u64GSBase;
     2296# endif
    22312297    }
    22322298#endif
     
    57755841    }
    57765842
     5843    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
     5844    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x);
     5845    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
    57775846    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
    57785847    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitIO, y1);
     
    64256494     *        we are actually going to be preempted, not all the time like we
    64266495     *        currently do. */
     6496
     6497    /* Restore host-state bits that VT-x only restores partially. */
     6498    if (pVCpu->hm.s.vmx.fRestoreHostFlags)
     6499    {
     6500        VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
     6501        pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
     6502    }
     6503
    64276504    /*
    64286505     * Sync the current VMCS (writes back internal data back into the VMCS region in memory)
     
    67406817#endif
    67416818
    6742     STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
    67436819    TMNotifyStartOfExecution(pVCpu);                            /* Finally, notify TM to resume its clocks as we're about
    67446820                                                                    to start executing. */
     6821    STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
    67456822}
    67466823
  • trunk/src/VBox/VMM/include/HMInternal.h

    r46192 r46267  
    682682        PGMMODE                     enmPrevGuestMode;
    683683#else
     684        uint32_t                    fRestoreHostFlags;
     685        VMXRESTOREHOST              RestoreHost;
    684686        /** Set if guest was executing in real mode (extra checks). */
    685687        bool                        fWasInRealMode;
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette