VirtualBox

Changeset 20997 in vbox for trunk/src/VBox


Ignore:
Timestamp:
Jun 26, 2009 10:23:04 PM (16 years ago)
Author:
vboxsync
Message:

HWACCM,CPUM: Fix for 64-bit Windows trashing guest XMM registers - VMX part.

Location:
trunk/src/VBox/VMM
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/HWACCMInternal.h

    r20981 r20997  
    441441typedef VMCSCACHE *PVMCSCACHE;
    442442
     443/** VMX StartVM function. */
     444typedef DECLCALLBACK(int) FNHWACCMVMXSTARTVM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
     445/** Pointer to an VMX StartVM function. */
     446typedef R0PTRTYPE(FNHWACCMVMXSTARTVM *) PFNHWACCMVMXSTARTVM;
     447
    443448/**
    444449 * HWACCM VMCPU Instance data.
     
    480485
    481486        /** Ring 0 handlers for VT-x. */
    482         DECLR0CALLBACKMEMBER(int,  pfnStartVM,(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu));
     487        PFNHWACCMVMXSTARTVM         pfnStartVM;
    483488
    484489        /** Current VMX_VMCS_CTRL_PROC_EXEC_CONTROLS. */
     
    719724VMMR0DECL(int) HWACCMR0DummyLoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
    720725
     726# ifdef VBOX_WITH_KERNEL_USING_XMM
     727DECLASM(int) hwaccmR0VMXStartVMWrapperXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHWACCMVMXSTARTVM pfnStartVM);
     728# endif
    721729
    722730# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
  • trunk/src/VBox/VMM/Makefile.kmk

    r20922 r20997  
    415415        VBOX_WITH_2X_4GB_ADDR_SPACE   VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 \
    416416        VBOX_WITH_HYBRID_32BIT_KERNEL VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
     417VMMR0_DEFS.win.amd64  = VBOX_WITH_KERNEL_USING_XMM
    417418
    418419ifeq ($(VBOX_LDR_FMT),pe)
  • trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp

    r18927 r20997  
    183183    {
    184184#ifndef CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE
    185 # ifdef VBOX_WITH_HYBRID_32BIT_KERNEL /** @todo remove the #else here and move cpumHandleLazyFPUAsm back to VMMGC after branching out 2.1. */
     185# if defined(VBOX_WITH_HYBRID_32BIT_KERNEL) || defined(VBOX_WITH_KERNEL_USING_XMM) /** @todo remove the #else here and move cpumHandleLazyFPUAsm back to VMMGC after branching out 3.0!!. */
     186        /** @todo Move the FFXR handling down into
     187         *        cpumR0SaveHostRestoreguestFPUState to optimize the
     188         *        VBOX_WITH_KERNEL_USING_XMM handling. */
    186189        /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */
    187190        uint64_t SavedEFER = 0;
     
    300303    {
    301304#ifndef CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE
     305# ifdef VBOX_WITH_KERNEL_USING_XMM
     306        /*
     307         * We've already saved the XMM registers in the assembly wrapper, so
     308         * we have to save them before saving the entire FPU state and put them
     309         * back afterwards.
     310         */
     311        /** @todo This could be skipped if MSR_K6_EFER_FFXSR is set, but
     312         *        I'm not able to test such an optimization tonight.
     313         *        We could just all this in assembly. */
     314        uint128_t aGuestXmmRegs[16];
     315        memcpy(&aGuestXmmRegs[0], &pVCpu->cpum.s.Guest.fpu.aXMM[0], sizeof(aGuestXmmRegs));
     316# endif
     317
     318        /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */
    302319        uint64_t oldMsrEFERHost = 0;
    303 
    304         /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */
    305320        if (pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE)
    306321        {
     
    314329            ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost | MSR_K6_EFER_FFXSR);
    315330
     331# ifdef VBOX_WITH_KERNEL_USING_XMM
     332        memcpy(&pVCpu->cpum.s.Guest.fpu.aXMM[0], &aGuestXmmRegs[0], sizeof(aGuestXmmRegs));
     333# endif
     334
    316335#else  /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */
     336# ifdef VBOX_WITH_KERNEL_USING_XMM
     337#  error "Fix all the NM_TRAPS_IN_KERNEL_MODE code path. I'm not going to fix unused code now."
     338# endif
    317339        cpumR0SaveFPU(pCtx);
    318340        if (pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE)
  • trunk/src/VBox/VMM/VMMR0/CPUMR0A.asm

    r20540 r20997  
    3434 %error "The jump table doesn't link on leopard."
    3535%endif
     36
     37;*******************************************************************************
     38;*      Defined Constants And Macros                                           *
     39;*******************************************************************************
     40;; The offset of the XMM registers in X86FXSTATE.
     41; Use define because I'm too lazy to convert the struct.
     42%define XMM_OFF_IN_X86FXSTATE   160
    3643
    3744
     
    103110    fxsave  [xDX + CPUMCPU.Host.fpu]    ; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor (safe assumption)
    104111    fxrstor [xDX + CPUMCPU.Guest.fpu]
     112
     113%ifdef VBOX_WITH_KERNEL_USING_XMM
     114    ; Restore the non-volatile xmm registers. ASSUMING 64-bit windows
     115    lea     r11, [xDX + CPUMCPU.Host.fpu + XMM_OFF_IN_X86FXSTATE]
     116    movdqa  xmm6,  [r11 + 060h]
     117    movdqa  xmm7,  [r11 + 070h]
     118    movdqa  xmm8,  [r11 + 080h]
     119    movdqa  xmm9,  [r11 + 090h]
     120    movdqa  xmm10, [r11 + 0a0h]
     121    movdqa  xmm11, [r11 + 0b0h]
     122    movdqa  xmm12, [r11 + 0c0h]
     123    movdqa  xmm13, [r11 + 0d0h]
     124    movdqa  xmm14, [r11 + 0e0h]
     125    movdqa  xmm15, [r11 + 0f0h]
     126%endif
    105127
    106128.done:
  • trunk/src/VBox/VMM/VMMR0/HWACCMR0A.asm

    r18851 r20997  
    6464 %endif
    6565%endif
     66
     67;; The offset of the XMM registers in X86FXSTATE.
     68; Use define because I'm too lazy to convert the struct.
     69%define XMM_OFF_IN_X86FXSTATE   160
    6670
    6771
     
    249253extern NAME(SUPR0Abs64bitKernelDS)
    250254extern NAME(SUPR0AbsKernelCS)
     255%endif
     256%ifdef VBOX_WITH_KERNEL_USING_XMM
     257extern NAME(CPUMIsGuestFPUStateActive)
    251258%endif
    252259
     
    10201027
    10211028
     1029%ifdef VBOX_WITH_KERNEL_USING_XMM
     1030;;
     1031; Wrapper around vmx.pfnStartVM that preserves host XMM registers and
     1032; load the guest ones when necessary.
     1033;
     1034; @cproto       DECLASM(int) hwaccmR0VMXStartVMWrapperXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHWACCMVMXSTARTVM pfnStartVM);
     1035;
     1036; @returns      eax
     1037;
     1038; @param        fResumeVM       msc:rcx
     1039; @param        pCtx            msc:rdx
     1040; @param        pVMCSCache      msc:r8
     1041; @param        pVM             msc:r9
     1042; @param        pVCpu           msc:[rbp+30h]
     1043; @param        pfnStartVM      msc:[rbp+38h]
     1044;
     1045; ASSUMING 64-bit and windows for now.
     1046ALIGNCODE(16)
     1047BEGINPROC hwaccmR0VMXStartVMWrapperXMM
     1048        push    xBP
     1049        mov     xBP, xSP
     1050        sub     xSP, 0a0h + 040h        ; Don't bother optimizing the frame size.
     1051
     1052        ; spill input parameters.
     1053        mov     [xBP + 010h], rcx       ; fResumeVM
     1054        mov     [xBP + 018h], rdx       ; pCtx
     1055        mov     [xBP + 020h], r8        ; pVMCSCache
     1056        mov     [xBP + 028h], r9        ; pVM
     1057
     1058        ; Ask CPUM whether we've started using the FPU yet.
     1059        mov     rcx, [xBP + 30h]        ; pVCpu
     1060        call    NAME(CPUMIsGuestFPUStateActive)
     1061        test    al, al
     1062        jnz     .guest_fpu_state_active
     1063
     1064        ; No need to mess with XMM registers just call the start routine and return.
     1065        mov     r11, [xBP + 38h]        ; pfnStartVM
     1066        mov     r10, [xBP + 30h]        ; pVCpu
     1067        mov     [xSP + 020h], r10
     1068        mov     rcx, [xBP + 010h]       ; fResumeVM
     1069        mov     rdx, [xBP + 018h]       ; pCtx
     1070        mov     r8,  [xBP + 020h]       ; pVMCSCache
     1071        mov     r9,  [xBP + 028h]       ; pVM
     1072        call    r11
     1073
     1074        leave
     1075        ret
     1076
     1077ALIGNCODE(8)
     1078.guest_fpu_state_active:
     1079        ; Save the host XMM registers.
     1080        movdqa  [rsp + 040h + 000h], xmm6
     1081        movdqa  [rsp + 040h + 010h], xmm7
     1082        movdqa  [rsp + 040h + 020h], xmm8
     1083        movdqa  [rsp + 040h + 030h], xmm9
     1084        movdqa  [rsp + 040h + 040h], xmm10
     1085        movdqa  [rsp + 040h + 050h], xmm11
     1086        movdqa  [rsp + 040h + 060h], xmm12
     1087        movdqa  [rsp + 040h + 070h], xmm13
     1088        movdqa  [rsp + 040h + 080h], xmm14
     1089        movdqa  [rsp + 040h + 090h], xmm15
     1090
     1091        ; Load the full guest XMM register state.
     1092        mov     r10, [xBP + 018h]       ; pCtx
     1093        lea     r10, [r10 + XMM_OFF_IN_X86FXSTATE]
     1094        movdqa  xmm0,  [r10 + 000h]
     1095        movdqa  xmm1,  [r10 + 010h]
     1096        movdqa  xmm2,  [r10 + 020h]
     1097        movdqa  xmm3,  [r10 + 030h]
     1098        movdqa  xmm4,  [r10 + 040h]
     1099        movdqa  xmm5,  [r10 + 050h]
     1100        movdqa  xmm6,  [r10 + 060h]
     1101        movdqa  xmm7,  [r10 + 070h]
     1102        movdqa  xmm8,  [r10 + 080h]
     1103        movdqa  xmm9,  [r10 + 090h]
     1104        movdqa  xmm10, [r10 + 0a0h]
     1105        movdqa  xmm11, [r10 + 0b0h]
     1106        movdqa  xmm12, [r10 + 0c0h]
     1107        movdqa  xmm13, [r10 + 0d0h]
     1108        movdqa  xmm14, [r10 + 0e0h]
     1109        movdqa  xmm15, [r10 + 0f0h]
     1110
     1111        ; Make the call (same as in the other case ).
     1112        mov     r11, [xBP + 38h]        ; pfnStartVM
     1113        mov     r10, [xBP + 30h]        ; pVCpu
     1114        mov     [xSP + 020h], r10
     1115        mov     rcx, [xBP + 010h]       ; fResumeVM
     1116        mov     rdx, [xBP + 018h]       ; pCtx
     1117        mov     r8,  [xBP + 020h]       ; pVMCSCache
     1118        mov     r9,  [xBP + 028h]       ; pVM
     1119        call    r11
     1120
     1121        ; Save the guest XMM registers.
     1122        mov     r10, [xBP + 018h]       ; pCtx
     1123        lea     r10, [r10 + XMM_OFF_IN_X86FXSTATE]
     1124        movdqa  [r10 + 000h], xmm0
     1125        movdqa  [r10 + 010h], xmm1
     1126        movdqa  [r10 + 020h], xmm2
     1127        movdqa  [r10 + 030h], xmm3
     1128        movdqa  [r10 + 040h], xmm4
     1129        movdqa  [r10 + 050h], xmm5
     1130        movdqa  [r10 + 060h], xmm6
     1131        movdqa  [r10 + 070h], xmm7
     1132        movdqa  [r10 + 080h], xmm8
     1133        movdqa  [r10 + 090h], xmm9
     1134        movdqa  [r10 + 0a0h], xmm10
     1135        movdqa  [r10 + 0b0h], xmm11
     1136        movdqa  [r10 + 0c0h], xmm12
     1137        movdqa  [r10 + 0d0h], xmm13
     1138        movdqa  [r10 + 0e0h], xmm14
     1139        movdqa  [r10 + 0f0h], xmm15
     1140
     1141        ; Load the host XMM registers.
     1142        movdqa  xmm6,  [rsp + 040h + 000h]
     1143        movdqa  xmm7,  [rsp + 040h + 010h]
     1144        movdqa  xmm8,  [rsp + 040h + 020h]
     1145        movdqa  xmm9,  [rsp + 040h + 030h]
     1146        movdqa  xmm10, [rsp + 040h + 040h]
     1147        movdqa  xmm11, [rsp + 040h + 050h]
     1148        movdqa  xmm12, [rsp + 040h + 060h]
     1149        movdqa  xmm13, [rsp + 040h + 070h]
     1150        movdqa  xmm14, [rsp + 040h + 080h]
     1151        movdqa  xmm15, [rsp + 040h + 090h]
     1152        leave
     1153        ret
     1154ENDPROC   hwaccmR0VMXStartVMWrapperXMM
     1155%endif
    10221156
    10231157;
  • trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp

    r20981 r20997  
    23342334
    23352335    TMNotifyStartOfExecution(pVCpu);
     2336#ifdef VBOX_WITH_KERNEL_USING_XMM
     2337    rc = hwaccmR0VMXStartVMWrapperXMM(pVCpu->hwaccm.s.fResumeVM, pCtx, &pVCpu->hwaccm.s.vmx.VMCSCache, pVM, pVCpu, pVCpu->hwaccm.s.vmx.pfnStartVM);
     2338#else
    23362339    rc = pVCpu->hwaccm.s.vmx.pfnStartVM(pVCpu->hwaccm.s.fResumeVM, pCtx, &pVCpu->hwaccm.s.vmx.VMCSCache, pVM, pVCpu);
     2340#endif
    23372341    TMNotifyEndOfExecution(pVCpu);
    23382342    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette