Changeset 13960 in vbox for trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp
- Timestamp:
- Nov 7, 2008 1:04:45 PM (16 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp
r12989 r13960 82 82 * Read the MSR and see if it's in use or not. 83 83 */ 84 uint32_t 84 uint32_t u32 = ASMRdMsr_Low(MSR_IA32_SYSENTER_CS); 85 85 if (u32) 86 86 { 87 pVM->cpum.s.fUseFlags |= CPUM_USE_SYSENTER; 87 for (unsigned i=0;i<pVM->cCPUs;i++) 88 pVM->aCpus[i].cpum.s.fUseFlags |= CPUM_USE_SYSENTER; 89 88 90 Log(("CPUMR0Init: host uses sysenter cs=%08x%08x\n", ASMRdMsr_High(MSR_IA32_SYSENTER_CS), u32)); 89 91 } … … 101 103 if (u32DR7 & X86_DR7_ENABLED_MASK) 102 104 { 103 pVM->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HOST; 105 for (unsigned i=0;i<pVM->cCPUs;i++) 106 pVM->aCpus[i].cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HOST; 104 107 Log(("CPUMR0Init: host uses debug registers (dr7=%x)\n", u32DR7)); 105 108 } … … 114 117 * @returns VBox status code. 115 118 * @param pVM VM handle. 119 * @param pVCpu VMCPU handle. 116 120 * @param pCtx CPU context 117 121 */ 118 VMMR0DECL(int) CPUMR0LoadGuestFPU(PVM pVM, P CPUMCTX pCtx)122 VMMR0DECL(int) CPUMR0LoadGuestFPU(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 119 123 { 120 124 Assert(pVM->cpum.s.CPUFeatures.edx.u1FXSR); … … 122 126 123 127 /* If the FPU state has already been loaded, then it's a guest trap. */ 124 if (pV M->cpum.s.fUseFlags & CPUM_USED_FPU)128 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU) 125 129 { 126 130 Assert( ((pCtx->cr0 & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS)) … … 177 181 { 178 182 ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost & ~MSR_K6_EFER_FFXSR); 179 pV M->cpum.s.fUseFlags |= CPUM_MANUAL_XMM_RESTORE;183 pVCpu->cpum.s.fUseFlags |= CPUM_MANUAL_XMM_RESTORE; 180 184 } 181 185 } 182 186 183 187 /* If we sync the FPU/XMM state on-demand, then we can continue execution as if nothing has happened. */ 184 int rc = CPUMHandleLazyFPU(pVM );188 int rc = CPUMHandleLazyFPU(pVM, pVCpu); 185 189 AssertRC(rc); 186 Assert(CPUMIsGuestFPUStateActive(pV M));190 Assert(CPUMIsGuestFPUStateActive(pVCpu)); 187 191 188 192 /* Restore EFER MSR */ 189 if (pV M->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE)193 if (pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE) 190 194 ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost); 191 195 … … 199 203 * We don't want the guest to be able to trigger floating point/SSE exceptions on the host. 200 204 */ 201 pV M->cpum.s.Host.fpu.FCW = CPUMGetFCW();205 pVCpu->cpum.s.Host.fpu.FCW = CPUMGetFCW(); 202 206 if (pVM->cpum.s.CPUFeatures.edx.u1SSE) 203 pV M->cpum.s.Host.fpu.MXCSR = CPUMGetMXCSR();207 pVCpu->cpum.s.Host.fpu.MXCSR = CPUMGetMXCSR(); 204 208 205 209 CPUMLoadFPUAsm(pCtx); … … 219 223 /* fxrstor doesn't restore the XMM state! */ 220 224 CPUMLoadXMMAsm(pCtx); 221 pV M->cpum.s.fUseFlags |= CPUM_MANUAL_XMM_RESTORE;225 pVCpu->cpum.s.fUseFlags |= CPUM_MANUAL_XMM_RESTORE; 222 226 } 223 227 } 224 228 #endif /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */ 225 229 226 pV M->cpum.s.fUseFlags |= CPUM_USED_FPU;230 pVCpu->cpum.s.fUseFlags |= CPUM_USED_FPU; 227 231 return VINF_SUCCESS; 228 232 } … … 234 238 * @returns VBox status code. 235 239 * @param pVM VM handle. 240 * @param pVCpu VMCPU handle. 236 241 * @param pCtx CPU context 237 242 */ 238 VMMR0DECL(int) CPUMR0SaveGuestFPU(PVM pVM, P CPUMCTX pCtx)243 VMMR0DECL(int) CPUMR0SaveGuestFPU(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 239 244 { 240 245 Assert(pVM->cpum.s.CPUFeatures.edx.u1FXSR); 241 246 Assert(ASMGetCR4() & X86_CR4_OSFSXR); 242 AssertReturn((pV M->cpum.s.fUseFlags & CPUM_USED_FPU), VINF_SUCCESS);247 AssertReturn((pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU), VINF_SUCCESS); 243 248 244 249 #ifndef CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE … … 246 251 247 252 /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */ 248 if (pV M->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE)253 if (pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE) 249 254 { 250 255 oldMsrEFERHost = ASMRdMsr(MSR_K6_EFER); 251 256 ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost & ~MSR_K6_EFER_FFXSR); 252 257 } 253 CPUMRestoreHostFPUState(pVM );258 CPUMRestoreHostFPUState(pVM, pVCpu); 254 259 255 260 /* Restore EFER MSR */ 256 if (pV M->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE)261 if (pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE) 257 262 ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost | MSR_K6_EFER_FFXSR); 258 263 259 264 #else /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */ 260 265 CPUMSaveFPUAsm(pCtx); 261 if (pV M->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE)266 if (pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE) 262 267 { 263 268 /* fxsave doesn't save the XMM state! */ … … 269 274 * We don't want the guest to be able to trigger floating point/SSE exceptions on the host. 270 275 */ 271 CPUMSetFCW(pV M->cpum.s.Host.fpu.FCW);276 CPUMSetFCW(pVCpu->cpum.s.Host.fpu.FCW); 272 277 if (pVM->cpum.s.CPUFeatures.edx.u1SSE) 273 CPUMSetMXCSR(pV M->cpum.s.Host.fpu.MXCSR);278 CPUMSetMXCSR(pVCpu->cpum.s.Host.fpu.MXCSR); 274 279 #endif /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */ 275 280 276 pV M->cpum.s.fUseFlags &= ~(CPUM_USED_FPU | CPUM_MANUAL_XMM_RESTORE);281 pVCpu->cpum.s.fUseFlags &= ~(CPUM_USED_FPU | CPUM_MANUAL_XMM_RESTORE); 277 282 return VINF_SUCCESS; 278 283 } … … 284 289 * @returns VBox status code. 285 290 * @param pVM VM handle. 291 * @param pVCpu VMCPU handle. 286 292 * @param pCtx CPU context 287 293 * @param fDR6 Include DR6 or not 288 294 */ 289 VMMR0DECL(int) CPUMR0SaveGuestDebugState(PVM pVM, P CPUMCTX pCtx, bool fDR6)290 { 291 Assert(pV M->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS);295 VMMR0DECL(int) CPUMR0SaveGuestDebugState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, bool fDR6) 296 { 297 Assert(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS); 292 298 293 299 /* Save the guest's debug state. The caller is responsible for DR7. */ … … 303 309 * DR7 contains 0x400 right now. 304 310 */ 305 ASMSetDR0(pV M->cpum.s.Host.dr0);306 ASMSetDR1(pV M->cpum.s.Host.dr1);307 ASMSetDR2(pV M->cpum.s.Host.dr2);308 ASMSetDR3(pV M->cpum.s.Host.dr3);309 ASMSetDR6(pV M->cpum.s.Host.dr6);310 ASMSetDR7(pV M->cpum.s.Host.dr7);311 312 pV M->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS;311 ASMSetDR0(pVCpu->cpum.s.Host.dr0); 312 ASMSetDR1(pVCpu->cpum.s.Host.dr1); 313 ASMSetDR2(pVCpu->cpum.s.Host.dr2); 314 ASMSetDR3(pVCpu->cpum.s.Host.dr3); 315 ASMSetDR6(pVCpu->cpum.s.Host.dr6); 316 ASMSetDR7(pVCpu->cpum.s.Host.dr7); 317 318 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS; 313 319 return VINF_SUCCESS; 314 320 } … … 320 326 * @returns VBox status code. 321 327 * @param pVM VM handle. 328 * @param pVCpu VMCPU handle. 322 329 * @param pCtx CPU context 323 330 * @param fDR6 Include DR6 or not 324 331 */ 325 VMMR0DECL(int) CPUMR0LoadGuestDebugState(PVM pVM, P CPUMCTX pCtx, bool fDR6)332 VMMR0DECL(int) CPUMR0LoadGuestDebugState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, bool fDR6) 326 333 { 327 334 /* Save the host state. */ 328 pV M->cpum.s.Host.dr0 = ASMGetDR0();329 pV M->cpum.s.Host.dr1 = ASMGetDR1();330 pV M->cpum.s.Host.dr2 = ASMGetDR2();331 pV M->cpum.s.Host.dr3 = ASMGetDR3();332 pV M->cpum.s.Host.dr6 = ASMGetDR6();335 pVCpu->cpum.s.Host.dr0 = ASMGetDR0(); 336 pVCpu->cpum.s.Host.dr1 = ASMGetDR1(); 337 pVCpu->cpum.s.Host.dr2 = ASMGetDR2(); 338 pVCpu->cpum.s.Host.dr3 = ASMGetDR3(); 339 pVCpu->cpum.s.Host.dr6 = ASMGetDR6(); 333 340 /** @todo dr7 might already have been changed to 0x400; don't care right now as it's harmless. */ 334 pV M->cpum.s.Host.dr7 = ASMGetDR7();341 pVCpu->cpum.s.Host.dr7 = ASMGetDR7(); 335 342 /* Make sure DR7 is harmless or else we could trigger breakpoints when restoring dr0-3 (!) */ 336 343 ASMSetDR7(X86_DR7_INIT_VAL); … … 344 351 ASMSetDR6(pCtx->dr[6]); 345 352 346 pV M->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS;347 return VINF_SUCCESS; 348 } 349 353 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS; 354 return VINF_SUCCESS; 355 } 356
Note:
See TracChangeset
for help on using the changeset viewer.