Changeset 20997 in vbox for trunk/src/VBox
- Timestamp:
- Jun 26, 2009 10:23:04 PM (16 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/HWACCMInternal.h
r20981 r20997 441 441 typedef VMCSCACHE *PVMCSCACHE; 442 442 443 /** VMX StartVM function. */ 444 typedef DECLCALLBACK(int) FNHWACCMVMXSTARTVM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu); 445 /** Pointer to an VMX StartVM function. */ 446 typedef R0PTRTYPE(FNHWACCMVMXSTARTVM *) PFNHWACCMVMXSTARTVM; 447 443 448 /** 444 449 * HWACCM VMCPU Instance data. … … 480 485 481 486 /** Ring 0 handlers for VT-x. */ 482 DECLR0CALLBACKMEMBER(int, pfnStartVM,(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu));487 PFNHWACCMVMXSTARTVM pfnStartVM; 483 488 484 489 /** Current VMX_VMCS_CTRL_PROC_EXEC_CONTROLS. */ … … 719 724 VMMR0DECL(int) HWACCMR0DummyLoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx); 720 725 726 # ifdef VBOX_WITH_KERNEL_USING_XMM 727 DECLASM(int) hwaccmR0VMXStartVMWrapperXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHWACCMVMXSTARTVM pfnStartVM); 728 # endif 721 729 722 730 # ifdef VBOX_WITH_HYBRID_32BIT_KERNEL -
trunk/src/VBox/VMM/Makefile.kmk
r20922 r20997 415 415 VBOX_WITH_2X_4GB_ADDR_SPACE VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 \ 416 416 VBOX_WITH_HYBRID_32BIT_KERNEL VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0 417 VMMR0_DEFS.win.amd64 = VBOX_WITH_KERNEL_USING_XMM 417 418 418 419 ifeq ($(VBOX_LDR_FMT),pe) -
trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp
r18927 r20997 183 183 { 184 184 #ifndef CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE 185 # ifdef VBOX_WITH_HYBRID_32BIT_KERNEL /** @todo remove the #else here and move cpumHandleLazyFPUAsm back to VMMGC after branching out 2.1. */ 185 # if defined(VBOX_WITH_HYBRID_32BIT_KERNEL) || defined(VBOX_WITH_KERNEL_USING_XMM) /** @todo remove the #else here and move cpumHandleLazyFPUAsm back to VMMGC after branching out 3.0!!. */ 186 /** @todo Move the FFXR handling down into 187 * cpumR0SaveHostRestoreguestFPUState to optimize the 188 * VBOX_WITH_KERNEL_USING_XMM handling. */ 186 189 /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */ 187 190 uint64_t SavedEFER = 0; … … 300 303 { 301 304 #ifndef CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE 305 # ifdef VBOX_WITH_KERNEL_USING_XMM 306 /* 307 * We've already saved the XMM registers in the assembly wrapper, so 308 * we have to save them before saving the entire FPU state and put them 309 * back afterwards. 310 */ 311 /** @todo This could be skipped if MSR_K6_EFER_FFXSR is set, but 312 * I'm not able to test such an optimization tonight. 313 * We could just all this in assembly. */ 314 uint128_t aGuestXmmRegs[16]; 315 memcpy(&aGuestXmmRegs[0], &pVCpu->cpum.s.Guest.fpu.aXMM[0], sizeof(aGuestXmmRegs)); 316 # endif 317 318 /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */ 302 319 uint64_t oldMsrEFERHost = 0; 303 304 /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */305 320 if (pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE) 306 321 { … … 314 329 ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost | MSR_K6_EFER_FFXSR); 315 330 331 # ifdef VBOX_WITH_KERNEL_USING_XMM 332 memcpy(&pVCpu->cpum.s.Guest.fpu.aXMM[0], &aGuestXmmRegs[0], sizeof(aGuestXmmRegs)); 333 # endif 334 316 335 #else /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */ 336 # ifdef VBOX_WITH_KERNEL_USING_XMM 337 # error "Fix all the NM_TRAPS_IN_KERNEL_MODE code path. I'm not going to fix unused code now." 338 # endif 317 339 cpumR0SaveFPU(pCtx); 318 340 if (pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE) -
trunk/src/VBox/VMM/VMMR0/CPUMR0A.asm
r20540 r20997 34 34 %error "The jump table doesn't link on leopard." 35 35 %endif 36 37 ;******************************************************************************* 38 ;* Defined Constants And Macros * 39 ;******************************************************************************* 40 ;; The offset of the XMM registers in X86FXSTATE. 41 ; Use define because I'm too lazy to convert the struct. 42 %define XMM_OFF_IN_X86FXSTATE 160 36 43 37 44 … … 103 110 fxsave [xDX + CPUMCPU.Host.fpu] ; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor (safe assumption) 104 111 fxrstor [xDX + CPUMCPU.Guest.fpu] 112 113 %ifdef VBOX_WITH_KERNEL_USING_XMM 114 ; Restore the non-volatile xmm registers. ASSUMING 64-bit windows 115 lea r11, [xDX + CPUMCPU.Host.fpu + XMM_OFF_IN_X86FXSTATE] 116 movdqa xmm6, [r11 + 060h] 117 movdqa xmm7, [r11 + 070h] 118 movdqa xmm8, [r11 + 080h] 119 movdqa xmm9, [r11 + 090h] 120 movdqa xmm10, [r11 + 0a0h] 121 movdqa xmm11, [r11 + 0b0h] 122 movdqa xmm12, [r11 + 0c0h] 123 movdqa xmm13, [r11 + 0d0h] 124 movdqa xmm14, [r11 + 0e0h] 125 movdqa xmm15, [r11 + 0f0h] 126 %endif 105 127 106 128 .done: -
trunk/src/VBox/VMM/VMMR0/HWACCMR0A.asm
r18851 r20997 64 64 %endif 65 65 %endif 66 67 ;; The offset of the XMM registers in X86FXSTATE. 68 ; Use define because I'm too lazy to convert the struct. 69 %define XMM_OFF_IN_X86FXSTATE 160 66 70 67 71 … … 249 253 extern NAME(SUPR0Abs64bitKernelDS) 250 254 extern NAME(SUPR0AbsKernelCS) 255 %endif 256 %ifdef VBOX_WITH_KERNEL_USING_XMM 257 extern NAME(CPUMIsGuestFPUStateActive) 251 258 %endif 252 259 … … 1020 1027 1021 1028 1029 %ifdef VBOX_WITH_KERNEL_USING_XMM 1030 ;; 1031 ; Wrapper around vmx.pfnStartVM that preserves host XMM registers and 1032 ; load the guest ones when necessary. 1033 ; 1034 ; @cproto DECLASM(int) hwaccmR0VMXStartVMWrapperXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHWACCMVMXSTARTVM pfnStartVM); 1035 ; 1036 ; @returns eax 1037 ; 1038 ; @param fResumeVM msc:rcx 1039 ; @param pCtx msc:rdx 1040 ; @param pVMCSCache msc:r8 1041 ; @param pVM msc:r9 1042 ; @param pVCpu msc:[rbp+30h] 1043 ; @param pfnStartVM msc:[rbp+38h] 1044 ; 1045 ; ASSUMING 64-bit and windows for now. 1046 ALIGNCODE(16) 1047 BEGINPROC hwaccmR0VMXStartVMWrapperXMM 1048 push xBP 1049 mov xBP, xSP 1050 sub xSP, 0a0h + 040h ; Don't bother optimizing the frame size. 1051 1052 ; spill input parameters. 1053 mov [xBP + 010h], rcx ; fResumeVM 1054 mov [xBP + 018h], rdx ; pCtx 1055 mov [xBP + 020h], r8 ; pVMCSCache 1056 mov [xBP + 028h], r9 ; pVM 1057 1058 ; Ask CPUM whether we've started using the FPU yet. 1059 mov rcx, [xBP + 30h] ; pVCpu 1060 call NAME(CPUMIsGuestFPUStateActive) 1061 test al, al 1062 jnz .guest_fpu_state_active 1063 1064 ; No need to mess with XMM registers just call the start routine and return. 1065 mov r11, [xBP + 38h] ; pfnStartVM 1066 mov r10, [xBP + 30h] ; pVCpu 1067 mov [xSP + 020h], r10 1068 mov rcx, [xBP + 010h] ; fResumeVM 1069 mov rdx, [xBP + 018h] ; pCtx 1070 mov r8, [xBP + 020h] ; pVMCSCache 1071 mov r9, [xBP + 028h] ; pVM 1072 call r11 1073 1074 leave 1075 ret 1076 1077 ALIGNCODE(8) 1078 .guest_fpu_state_active: 1079 ; Save the host XMM registers. 1080 movdqa [rsp + 040h + 000h], xmm6 1081 movdqa [rsp + 040h + 010h], xmm7 1082 movdqa [rsp + 040h + 020h], xmm8 1083 movdqa [rsp + 040h + 030h], xmm9 1084 movdqa [rsp + 040h + 040h], xmm10 1085 movdqa [rsp + 040h + 050h], xmm11 1086 movdqa [rsp + 040h + 060h], xmm12 1087 movdqa [rsp + 040h + 070h], xmm13 1088 movdqa [rsp + 040h + 080h], xmm14 1089 movdqa [rsp + 040h + 090h], xmm15 1090 1091 ; Load the full guest XMM register state. 1092 mov r10, [xBP + 018h] ; pCtx 1093 lea r10, [r10 + XMM_OFF_IN_X86FXSTATE] 1094 movdqa xmm0, [r10 + 000h] 1095 movdqa xmm1, [r10 + 010h] 1096 movdqa xmm2, [r10 + 020h] 1097 movdqa xmm3, [r10 + 030h] 1098 movdqa xmm4, [r10 + 040h] 1099 movdqa xmm5, [r10 + 050h] 1100 movdqa xmm6, [r10 + 060h] 1101 movdqa xmm7, [r10 + 070h] 1102 movdqa xmm8, [r10 + 080h] 1103 movdqa xmm9, [r10 + 090h] 1104 movdqa xmm10, [r10 + 0a0h] 1105 movdqa xmm11, [r10 + 0b0h] 1106 movdqa xmm12, [r10 + 0c0h] 1107 movdqa xmm13, [r10 + 0d0h] 1108 movdqa xmm14, [r10 + 0e0h] 1109 movdqa xmm15, [r10 + 0f0h] 1110 1111 ; Make the call (same as in the other case ). 1112 mov r11, [xBP + 38h] ; pfnStartVM 1113 mov r10, [xBP + 30h] ; pVCpu 1114 mov [xSP + 020h], r10 1115 mov rcx, [xBP + 010h] ; fResumeVM 1116 mov rdx, [xBP + 018h] ; pCtx 1117 mov r8, [xBP + 020h] ; pVMCSCache 1118 mov r9, [xBP + 028h] ; pVM 1119 call r11 1120 1121 ; Save the guest XMM registers. 1122 mov r10, [xBP + 018h] ; pCtx 1123 lea r10, [r10 + XMM_OFF_IN_X86FXSTATE] 1124 movdqa [r10 + 000h], xmm0 1125 movdqa [r10 + 010h], xmm1 1126 movdqa [r10 + 020h], xmm2 1127 movdqa [r10 + 030h], xmm3 1128 movdqa [r10 + 040h], xmm4 1129 movdqa [r10 + 050h], xmm5 1130 movdqa [r10 + 060h], xmm6 1131 movdqa [r10 + 070h], xmm7 1132 movdqa [r10 + 080h], xmm8 1133 movdqa [r10 + 090h], xmm9 1134 movdqa [r10 + 0a0h], xmm10 1135 movdqa [r10 + 0b0h], xmm11 1136 movdqa [r10 + 0c0h], xmm12 1137 movdqa [r10 + 0d0h], xmm13 1138 movdqa [r10 + 0e0h], xmm14 1139 movdqa [r10 + 0f0h], xmm15 1140 1141 ; Load the host XMM registers. 1142 movdqa xmm6, [rsp + 040h + 000h] 1143 movdqa xmm7, [rsp + 040h + 010h] 1144 movdqa xmm8, [rsp + 040h + 020h] 1145 movdqa xmm9, [rsp + 040h + 030h] 1146 movdqa xmm10, [rsp + 040h + 040h] 1147 movdqa xmm11, [rsp + 040h + 050h] 1148 movdqa xmm12, [rsp + 040h + 060h] 1149 movdqa xmm13, [rsp + 040h + 070h] 1150 movdqa xmm14, [rsp + 040h + 080h] 1151 movdqa xmm15, [rsp + 040h + 090h] 1152 leave 1153 ret 1154 ENDPROC hwaccmR0VMXStartVMWrapperXMM 1155 %endif 1022 1156 1023 1157 ; -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r20981 r20997 2334 2334 2335 2335 TMNotifyStartOfExecution(pVCpu); 2336 #ifdef VBOX_WITH_KERNEL_USING_XMM 2337 rc = hwaccmR0VMXStartVMWrapperXMM(pVCpu->hwaccm.s.fResumeVM, pCtx, &pVCpu->hwaccm.s.vmx.VMCSCache, pVM, pVCpu, pVCpu->hwaccm.s.vmx.pfnStartVM); 2338 #else 2336 2339 rc = pVCpu->hwaccm.s.vmx.pfnStartVM(pVCpu->hwaccm.s.fResumeVM, pCtx, &pVCpu->hwaccm.s.vmx.VMCSCache, pVM, pVCpu); 2340 #endif 2337 2341 TMNotifyEndOfExecution(pVCpu); 2338 2342 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
Note:
See TracChangeset
for help on using the changeset viewer.