VirtualBox

Changeset 14859 in vbox for trunk/src/VBox


Ignore:
Timestamp:
Dec 1, 2008 2:01:55 PM (16 years ago)
Author:
vboxsync
Message:

More updates for 32/64.

Location:
trunk/src/VBox/VMM
Files:
7 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/CPUMInternal.h

    r14785 r14859  
    7272/** The XMM state was manually restored. (AMD only) */
    7373#define CPUM_MANUAL_XMM_RESTORE         RT_BIT(6)
     74/** Sync the FPU state on entry (32->64 switcher only). */
     75#define CPUM_SYNC_FPU_STATE             RT_BIT(7)
     76/** Sync the debug state on entry (32->64 switcher only). */
     77#define CPUM_SYNC_DEBUG_STATE           RT_BIT(8)
    7478/** @} */
    7579
     
    374378
    375379DECLASM(int)      CPUMHandleLazyFPUAsm(PCPUMCPU pCPUM);
     380DECLASM(int)      CPUMSaveGuestRestoreHostFPUStateAsm(PCPUMCPU pCPUM);
    376381DECLASM(int)      CPUMRestoreHostFPUStateAsm(PCPUMCPU pCPUM);
    377382DECLASM(void)     CPUMLoadFPUAsm(PCPUMCTX pCtx);
  • trunk/src/VBox/VMM/VMMAll/CPUMAllA.asm

    r13960 r14859  
    209209;
    210210align 16
    211 BEGINPROC CPUMRestoreHostFPUStateAsm
     211BEGINPROC CPUMSaveGuestRestoreHostFPUStateAsm
    212212%ifdef RT_ARCH_AMD64
    213213 %ifdef RT_OS_WINDOWS
     
    238238    xor     eax, eax
    239239    ret
     240ENDPROC   CPUMSaveGuestRestoreHostFPUStateAsm
     241
     242;;
     243; Sets the host's FPU/XMM state
     244;
     245; @returns  0
     246; @param    pCPUMCPU  x86:[esp+4] GCC:rdi MSC:rcx     CPUMCPU pointer
     247;
     248align 16
     249BEGINPROC CPUMRestoreHostFPUStateAsm
     250%ifdef RT_ARCH_AMD64
     251 %ifdef RT_OS_WINDOWS
     252    mov     xDX, rcx
     253 %else
     254    mov     xDX, rdi
     255 %endif
     256%else
     257    mov     xDX, dword [esp + 4]
     258%endif
     259
     260    ; Restore FPU if guest has used it.
     261    ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
     262    test    dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU
     263    jz short gth_fpu_no_2
     264
     265    mov     xAX, cr0
     266    mov     xCX, xAX                    ; save old CR0
     267    and     xAX, ~(X86_CR0_TS | X86_CR0_EM)
     268    mov     cr0, xAX
     269
     270    fxrstor [xDX + CPUMCPU.Host.fpu]
     271
     272    mov     cr0, xCX                    ; and restore old CR0 again
     273    and     dword [xDX + CPUMCPU.fUseFlags], ~CPUM_USED_FPU
     274gth_fpu_no_2:
     275    xor     eax, eax
     276    ret
    240277ENDPROC   CPUMRestoreHostFPUStateAsm
    241 
    242278
    243279;;
  • trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp

    r14704 r14859  
    20842084 * @param   pVCpu       VMCPU handle
    20852085 */
     2086VMMDECL(int) CPUMSaveGuestRestoreHostFPUState(PVM pVM, PVMCPU pVCpu)
     2087{
     2088    Assert(pVM->cpum.s.CPUFeatures.edx.u1FXSR);
     2089    return CPUMSaveGuestRestoreHostFPUStateAsm(&pVCpu->cpum.s);
     2090}
     2091
     2092/**
     2093 * Set host FPU/XMM state
     2094 *
     2095 * @returns VBox status code.
     2096 * @param   pVM         VM handle.
     2097 * @param   pVCpu       VMCPU handle
     2098 */
    20862099VMMDECL(int) CPUMRestoreHostFPUState(PVM pVM, PVMCPU pVCpu)
    20872100{
  • trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp

    r13960 r14859  
    167167    }
    168168
     169#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
     170    if (CPUMIsGuestInLongModeEx(pCtx))
     171    {
     172        /* Restore the state on entry as we need to be in 64 bits mode to access the full state. */
     173        pVCpu->cpum.s.fUseFlags |= CPUM_SYNC_FPU_STATE;
     174    }
     175    else
     176#endif
     177    {
    169178#ifndef CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE
    170     uint64_t oldMsrEFERHost;
    171     uint32_t oldCR0 = ASMGetCR0();
    172 
    173     /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */
    174     if (pVM->cpum.s.CPUFeaturesExt.edx & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
    175     {
    176         /** @todo Do we really need to read this every time?? The host could change this on the fly though.
    177          *  bird: what about starting by skipping the ASMWrMsr below if we didn't
    178          *        change anything? Ditto for the stuff in CPUMR0SaveGuestFPU. */
    179         oldMsrEFERHost = ASMRdMsr(MSR_K6_EFER);
    180         if (oldMsrEFERHost & MSR_K6_EFER_FFXSR)
    181         {
    182             ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost & ~MSR_K6_EFER_FFXSR);
    183             pVCpu->cpum.s.fUseFlags |= CPUM_MANUAL_XMM_RESTORE;
    184         }
    185     }
    186 
    187     /* If we sync the FPU/XMM state on-demand, then we can continue execution as if nothing has happened. */
    188     int rc = CPUMHandleLazyFPU(pVM, pVCpu);
    189     AssertRC(rc);
    190     Assert(CPUMIsGuestFPUStateActive(pVCpu));
    191 
    192     /* Restore EFER MSR */
    193     if (pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE)
    194         ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost);
    195 
    196         /* CPUMHandleLazyFPU could have changed CR0; restore it. */
    197     ASMSetCR0(oldCR0);
     179        uint64_t oldMsrEFERHost;
     180        uint32_t oldCR0 = ASMGetCR0();
     181
     182        /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */
     183        if (pVM->cpum.s.CPUFeaturesExt.edx & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
     184        {
     185            /** @todo Do we really need to read this every time?? The host could change this on the fly though.
     186             *  bird: what about starting by skipping the ASMWrMsr below if we didn't
     187             *        change anything? Ditto for the stuff in CPUMR0SaveGuestFPU. */
     188            oldMsrEFERHost = ASMRdMsr(MSR_K6_EFER);
     189            if (oldMsrEFERHost & MSR_K6_EFER_FFXSR)
     190            {
     191                ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost & ~MSR_K6_EFER_FFXSR);
     192                pVCpu->cpum.s.fUseFlags |= CPUM_MANUAL_XMM_RESTORE;
     193            }
     194        }
     195
     196        /* If we sync the FPU/XMM state on-demand, then we can continue execution as if nothing has happened. */
     197        int rc = CPUMHandleLazyFPU(pVM, pVCpu);
     198        AssertRC(rc);
     199        Assert(CPUMIsGuestFPUStateActive(pVCpu));
     200
     201        /* Restore EFER MSR */
     202        if (pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE)
     203            ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost);
     204
     205            /* CPUMHandleLazyFPU could have changed CR0; restore it. */
     206        ASMSetCR0(oldCR0);
    198207
    199208#else  /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */
    200209
    201     /*
    202      * Save the FPU control word and MXCSR, so we can restore the state properly afterwards.
    203      * We don't want the guest to be able to trigger floating point/SSE exceptions on the host.
    204      */
    205     pVCpu->cpum.s.Host.fpu.FCW = CPUMGetFCW();
    206     if (pVM->cpum.s.CPUFeatures.edx.u1SSE)
    207         pVCpu->cpum.s.Host.fpu.MXCSR = CPUMGetMXCSR();
    208 
    209     CPUMLoadFPUAsm(pCtx);
    210 
    211     /*
    212      * The MSR_K6_EFER_FFXSR feature is AMD only so far, but check the cpuid just in case Intel adds it in the future.
    213      *
    214      * MSR_K6_EFER_FFXSR changes the behaviour of fxsave and fxrstore: the XMM state isn't saved/restored
    215      */
    216     if (pVM->cpum.s.CPUFeaturesExt.edx & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
    217     {
    218         /** @todo Do we really need to read this every time?? The host could change this on the fly though. */
    219         uint64_t msrEFERHost = ASMRdMsr(MSR_K6_EFER);
    220 
    221         if (msrEFERHost & MSR_K6_EFER_FFXSR)
    222         {
    223             /* fxrstor doesn't restore the XMM state! */
    224             CPUMLoadXMMAsm(pCtx);
    225             pVCpu->cpum.s.fUseFlags |= CPUM_MANUAL_XMM_RESTORE;
    226         }
    227     }
     210        /*
     211         * Save the FPU control word and MXCSR, so we can restore the state properly afterwards.
     212         * We don't want the guest to be able to trigger floating point/SSE exceptions on the host.
     213         */
     214        pVCpu->cpum.s.Host.fpu.FCW = CPUMGetFCW();
     215        if (pVM->cpum.s.CPUFeatures.edx.u1SSE)
     216            pVCpu->cpum.s.Host.fpu.MXCSR = CPUMGetMXCSR();
     217
     218        CPUMLoadFPUAsm(pCtx);
     219
     220        /*
     221         * The MSR_K6_EFER_FFXSR feature is AMD only so far, but check the cpuid just in case Intel adds it in the future.
     222         *
     223         * MSR_K6_EFER_FFXSR changes the behaviour of fxsave and fxrstore: the XMM state isn't saved/restored
     224         */
     225        if (pVM->cpum.s.CPUFeaturesExt.edx & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
     226        {
     227            /** @todo Do we really need to read this every time?? The host could change this on the fly though. */
     228            uint64_t msrEFERHost = ASMRdMsr(MSR_K6_EFER);
     229
     230            if (msrEFERHost & MSR_K6_EFER_FFXSR)
     231            {
     232                /* fxrstor doesn't restore the XMM state! */
     233                CPUMLoadXMMAsm(pCtx);
     234                pVCpu->cpum.s.fUseFlags |= CPUM_MANUAL_XMM_RESTORE;
     235            }
     236        }
    228237#endif /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */
     238    }
    229239
    230240    pVCpu->cpum.s.fUseFlags |= CPUM_USED_FPU;
     
    247257    AssertReturn((pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU), VINF_SUCCESS);
    248258
     259#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
     260    if (CPUMIsGuestInLongModeEx(pCtx))
     261    {
     262        Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_FPU_STATE));
     263        HWACCMR0SaveFPUState(pVM, pVCpu, pCtx);
     264        CPUMRestoreHostFPUState(pVCpu);
     265    }
     266    else
     267#endif
     268    {
    249269#ifndef CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE
    250     uint64_t oldMsrEFERHost;
    251 
    252     /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */
    253     if (pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE)
    254     {
    255         oldMsrEFERHost = ASMRdMsr(MSR_K6_EFER);
    256         ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost & ~MSR_K6_EFER_FFXSR);
    257     }
    258     CPUMRestoreHostFPUState(pVM, pVCpu);
    259 
    260     /* Restore EFER MSR */
    261     if (pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE)
    262         ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost | MSR_K6_EFER_FFXSR);
     270        uint64_t oldMsrEFERHost;
     271
     272        /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */
     273        if (pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE)
     274        {
     275            oldMsrEFERHost = ASMRdMsr(MSR_K6_EFER);
     276            ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost & ~MSR_K6_EFER_FFXSR);
     277        }
     278        CPUMSaveGuestRestoreHostFPUState(pVM, pVCpu);
     279
     280        /* Restore EFER MSR */
     281        if (pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE)
     282            ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost | MSR_K6_EFER_FFXSR);
    263283
    264284#else  /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */
    265     CPUMSaveFPUAsm(pCtx);
    266     if (pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE)
    267     {
    268         /* fxsave doesn't save the XMM state! */
    269         CPUMSaveXMMAsm(pCtx);
    270     }
    271 
    272     /*
    273      * Restore the original FPU control word and MXCSR.
    274      * We don't want the guest to be able to trigger floating point/SSE exceptions on the host.
    275      */
    276     CPUMSetFCW(pVCpu->cpum.s.Host.fpu.FCW);
    277     if (pVM->cpum.s.CPUFeatures.edx.u1SSE)
    278         CPUMSetMXCSR(pVCpu->cpum.s.Host.fpu.MXCSR);
     285        CPUMSaveFPUAsm(pCtx);
     286        if (pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE)
     287        {
     288            /* fxsave doesn't save the XMM state! */
     289            CPUMSaveXMMAsm(pCtx);
     290        }
     291
     292        /*
     293         * Restore the original FPU control word and MXCSR.
     294         * We don't want the guest to be able to trigger floating point/SSE exceptions on the host.
     295         */
     296        CPUMSetFCW(pVCpu->cpum.s.Host.fpu.FCW);
     297        if (pVM->cpum.s.CPUFeatures.edx.u1SSE)
     298            CPUMSetMXCSR(pVCpu->cpum.s.Host.fpu.MXCSR);
    279299#endif /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */
     300    }
    280301
    281302    pVCpu->cpum.s.fUseFlags &= ~(CPUM_USED_FPU | CPUM_MANUAL_XMM_RESTORE);
     
    298319
    299320    /* Save the guest's debug state. The caller is responsible for DR7. */
    300     pCtx->dr[0] = ASMGetDR0();
    301     pCtx->dr[1] = ASMGetDR1();
    302     pCtx->dr[2] = ASMGetDR2();
    303     pCtx->dr[3] = ASMGetDR3();
    304     if (fDR6)
    305         pCtx->dr[6] = ASMGetDR6();
     321#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
     322    if (CPUMIsGuestInLongModeEx(pCtx))
     323    {
     324        Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_STATE));
     325        HWACCMR0SaveDebugState(pVM, pVCpu, pCtx, fDR6);
     326    }
     327    else
     328#endif
     329    {
     330        pCtx->dr[0] = ASMGetDR0();
     331        pCtx->dr[1] = ASMGetDR1();
     332        pCtx->dr[2] = ASMGetDR2();
     333        pCtx->dr[3] = ASMGetDR3();
     334        if (fDR6)
     335            pCtx->dr[6] = ASMGetDR6();
     336    }
    306337
    307338    /*
     
    344375
    345376    /* Activate the guest state DR0-3; DR7 is left to the caller. */
    346     ASMSetDR0(pCtx->dr[0]);
    347     ASMSetDR1(pCtx->dr[1]);
    348     ASMSetDR2(pCtx->dr[2]);
    349     ASMSetDR3(pCtx->dr[3]);
    350     if (fDR6)
    351         ASMSetDR6(pCtx->dr[6]);
     377#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
     378    if (CPUMIsGuestInLongModeEx(pCtx))
     379    {
     380        /* Restore the state on entry as we need to be in 64 bits mode to access the full state. */
     381        pVCpu->cpum.s.fUseFlags |= CPUM_SYNC_DEBUG_STATE;
     382    }
     383    else
     384#endif
     385    {
     386        ASMSetDR0(pCtx->dr[0]);
     387        ASMSetDR1(pCtx->dr[1]);
     388        ASMSetDR2(pCtx->dr[2]);
     389        ASMSetDR3(pCtx->dr[3]);
     390        if (fDR6)
     391            ASMSetDR6(pCtx->dr[6]);
     392    }
    352393
    353394    pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS;
  • trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp

    r14845 r14859  
    998998    return rc;
    999999}
     1000
     1001
     1002#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
     1003/**
     1004 * Save guest FPU/XMM state
     1005 *
     1006 * @returns VBox status code.
     1007 * @param   pVM         VM handle.
     1008 * @param   pVCpu       VMCPU handle.
     1009 * @param   pCtx        CPU context
     1010 */
     1011VMMR0DECL(int)   HWACCMR0SaveFPUState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     1012{
     1013    return VINF_SUCCESS;
     1014}
     1015
     1016/**
     1017 * Save guest debug state (64 bits guest mode & 32 bits host only)
     1018 *
     1019 * @returns VBox status code.
     1020 * @param   pVM         VM handle.
     1021 * @param   pVCpu       VMCPU handle.
     1022 * @param   pCtx        CPU context
     1023 * @param   fDR6        Include DR6 or not
     1024 */
     1025VMMR0DECL(int)   HWACCMR0SaveDebugState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, bool fDR6)
     1026{
     1027    return VINF_SUCCESS;
     1028}
     1029#endif /* HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) */
    10001030
    10011031/**
  • trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp

    r14580 r14859  
    22772277DECLASM(int) SVMR0VMSwitcherRun64(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx)
    22782278{
     2279    /* @todo This code is not guest SMP safe (hyper context) */
     2280    AssertReturn(pVM->cCPUs == 1, VERR_ACCESS_DENIED);
    22792281    return VERR_NOT_IMPLEMENTED;
    22802282}
  • trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp

    r14845 r14859  
    34343434    RTHCPHYS        pPageCpuPhys;
    34353435
     3436    /* @todo This code is not guest SMP safe (hyper context) */
     3437    AssertReturn(pVM->cCPUs == 1, VERR_ACCESS_DENIED);
     3438
    34363439    pCpu = HWACCMR0GetCurrentCpuEx(pVCpu->idCpu);
    34373440    pPageCpuPhys = RTR0MemObjGetPagePhysAddr(pCpu->pMemObj, 0);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette