Changeset 55048 in vbox
- Timestamp:
- Mar 31, 2015 6:49:19 PM (10 years ago)
- svn:sync-xref-src-repo-rev:
- 99337
- Location:
- trunk
- Files:
-
- 24 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/cpum.mac
r55004 r55048 136 136 137 137 struc CPUMCTX 138 .XState resb XSTATE_SIZE139 138 .eax resq 1 140 139 .ecx resq 1 … … 235 234 .msrKERNELGSBASE resb 8 236 235 .msrApicBase resb 8 237 .au32SizePadding resb 24 236 alignb 8 237 .pXStateR0 RTR0PTR_RES 1 238 .pXStateR3 RTR3PTR_RES 1 239 .pXStateRC RTRCPTR_RES 1 240 alignb 64 238 241 endstruc 239 242 -
trunk/include/VBox/vmm/cpumctx.h
r55004 r55048 247 247 typedef struct CPUMCTX 248 248 { 249 /** FPU state. (16-byte alignment)250 * @todo This doesn't have to be in X86FXSTATE on CPUs without fxsr - we need a type for the251 * actual format or convert it (waste of time). */252 X86XSAVEAREA XState;253 254 249 /** CPUMCTXCORE Part. 255 250 * @{ */ … … 404 399 /** @} */ 405 400 401 /** Pointer to the FPU/SSE/AVX/XXXX state ring-0 mapping. */ 402 R0PTRTYPE(PX86XSAVEAREA) pXStateR0; 403 /** Pointer to the FPU/SSE/AVX/XXXX state ring-3 mapping. */ 404 R3PTRTYPE(PX86XSAVEAREA) pXStateR3; 405 /** Pointer to the FPU/SSE/AVX/XXXX state raw-mode mapping. */ 406 RCPTRTYPE(PX86XSAVEAREA) pXStateRC; 407 406 408 /** Size padding. */ 407 uint32_t au32SizePadding[ 6];409 uint32_t au32SizePadding[HC_ARCH_BITS == 32 ? 3 : 1]; 408 410 } CPUMCTX; 409 411 #pragma pack() -
trunk/include/VBox/vmm/ssm.h
r50575 r55048 378 378 /** Saved using SSMR3PutMem, don't be too strict. */ 379 379 #define SSMSTRUCT_FLAGS_SAVED_AS_MEM RT_BIT_32(3) 380 /** No introductory structure marker. Use when splitting up structures. */ 381 #define SSMSTRUCT_FLAGS_NO_LEAD_MARKER RT_BIT_32(4) 382 /** No trailing structure marker. Use when splitting up structures. */ 383 #define SSMSTRUCT_FLAGS_NO_TAIL_MARKER RT_BIT_32(5) 384 380 385 /** Band-aid for old SSMR3PutMem/SSMR3GetMem of structurs with host pointers. 381 386 * @remarks This type is normally only used up to the first changes to the … … 390 395 | SSMSTRUCT_FLAGS_NO_MARKERS | SSMSTRUCT_FLAGS_SAVED_AS_MEM) 391 396 /** Mask of the valid bits. */ 392 #define SSMSTRUCT_FLAGS_VALID_MASK UINT32_C(0x000000 0f)397 #define SSMSTRUCT_FLAGS_VALID_MASK UINT32_C(0x0000003f) 393 398 /** @} */ 394 399 -
trunk/include/VBox/vmm/vm.h
r55034 r55048 246 246 struct CPUMCPU s; 247 247 #endif 248 uint8_t padding[ 28672]; /* multiple of 4096 */248 uint8_t padding[4096]; /* multiple of 4096 */ 249 249 } cpum; 250 250 -
trunk/include/VBox/vmm/vm.mac
r54898 r55048 148 148 .pgm resb 4096 149 149 alignb 4096 150 .cpum resb 28672150 .cpum resb 4096 151 151 alignb 4096 152 152 endstruc -
trunk/include/iprt/x86.h
r54898 r55048 2457 2457 #define X86_OFF_FXSTATE_RSVD 0x1d0 2458 2458 /** The 32-bit magic used to recognize if this a 32-bit FPU state. Don't 2459 * forget to update x86.mac if you change this! */ 2459 * forget to update x86.mac if you change this! 2460 * @todo r=bird: This has nothing what-so-ever to do here.... */ 2460 2461 #define X86_FXSTATE_RSVD_32BIT_MAGIC 0x32b3232b 2461 2462 #ifndef VBOX_FOR_DTRACE_LIB … … 2791 2792 AssertCompileMemberOffset(X86XSAVEAREA, u.Intel.Zmm16Hi, 0x680 /* 1664 */); 2792 2793 #endif 2794 /** Pointer to a XSAVE area. */ 2795 typedef X86XSAVEAREA *PX86XSAVEAREA; 2796 /** Pointer to a const XSAVE area. */ 2797 typedef X86XSAVEAREA const *PCX86XSAVEAREA; 2793 2798 2794 2799 -
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r55000 r55048 5226 5226 * @param pIemCpu The IEM per CPU data. 5227 5227 * @param pCtx The CPU context. 5228 */ 5229 DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PIEMCPU pIemCpu, PCPUMCTX pCtx) 5230 { 5231 pCtx->XState.x87.FOP = pIemCpu->abOpcode[pIemCpu->offFpuOpcode] 5232 | ((uint16_t)(pIemCpu->abOpcode[pIemCpu->offFpuOpcode - 1] & 0x7) << 8); 5233 /** @todo XState.x87.CS and FPUIP needs to be kept seperately. */ 5228 * @param pFpuCtx The FPU context. 5229 */ 5230 DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PIEMCPU pIemCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx) 5231 { 5232 pFpuCtx->FOP = pIemCpu->abOpcode[pIemCpu->offFpuOpcode] 5233 | ((uint16_t)(pIemCpu->abOpcode[pIemCpu->offFpuOpcode - 1] & 0x7) << 8); 5234 /** @todo x87.CS and FPUIP needs to be kept seperately. */ 5234 5235 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu)) 5235 5236 { 5236 5237 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled 5237 5238 * happens in real mode here based on the fnsave and fnstenv images. */ 5238 p Ctx->XState.x87.CS = 0;5239 p Ctx->XState.x87.FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);5239 pFpuCtx->CS = 0; 5240 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4); 5240 5241 } 5241 5242 else 5242 5243 { 5243 p Ctx->XState.x87.CS = pCtx->cs.Sel;5244 p Ctx->XState.x87.FPUIP = pCtx->rip;5245 } 5246 } 5247 5248 5249 /** 5250 * Updates the XState.x87.DS and FPUDP registers.5244 pFpuCtx->CS = pCtx->cs.Sel; 5245 pFpuCtx->FPUIP = pCtx->rip; 5246 } 5247 } 5248 5249 5250 /** 5251 * Updates the x87.DS and FPUDP registers. 5251 5252 * 5252 5253 * @param pIemCpu The IEM per CPU data. 5253 5254 * @param pCtx The CPU context. 5255 * @param pFpuCtx The FPU context. 5254 5256 * @param iEffSeg The effective segment register. 5255 5257 * @param GCPtrEff The effective address relative to @a iEffSeg. 5256 5258 */ 5257 DECLINLINE(void) iemFpuUpdateDP(PIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)5259 DECLINLINE(void) iemFpuUpdateDP(PIEMCPU pIemCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff) 5258 5260 { 5259 5261 RTSEL sel; … … 5270 5272 sel = pCtx->ds.Sel; 5271 5273 } 5272 /** @todo XState.x87.DS and FPUDP needs to be kept seperately. */5274 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */ 5273 5275 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu)) 5274 5276 { 5275 p Ctx->XState.x87.DS = 0;5276 p Ctx->XState.x87.FPUDP = (uint32_t)GCPtrEff | ((uint32_t)sel << 4);5277 pFpuCtx->DS = 0; 5278 pFpuCtx->FPUDP = (uint32_t)GCPtrEff | ((uint32_t)sel << 4); 5277 5279 } 5278 5280 else 5279 5281 { 5280 p Ctx->XState.x87.DS = sel;5281 p Ctx->XState.x87.FPUDP = GCPtrEff;5282 pFpuCtx->DS = sel; 5283 pFpuCtx->FPUDP = GCPtrEff; 5282 5284 } 5283 5285 } … … 5287 5289 * Rotates the stack registers in the push direction. 5288 5290 * 5289 * @param p Ctx The CPU context.5291 * @param pFpuCtx The FPU context. 5290 5292 * @remarks This is a complete waste of time, but fxsave stores the registers in 5291 5293 * stack order. 5292 5294 */ 5293 DECLINLINE(void) iemFpuRotateStackPush(P CPUMCTX pCtx)5294 { 5295 RTFLOAT80U r80Tmp = p Ctx->XState.x87.aRegs[7].r80;5296 p Ctx->XState.x87.aRegs[7].r80 = pCtx->XState.x87.aRegs[6].r80;5297 p Ctx->XState.x87.aRegs[6].r80 = pCtx->XState.x87.aRegs[5].r80;5298 p Ctx->XState.x87.aRegs[5].r80 = pCtx->XState.x87.aRegs[4].r80;5299 p Ctx->XState.x87.aRegs[4].r80 = pCtx->XState.x87.aRegs[3].r80;5300 p Ctx->XState.x87.aRegs[3].r80 = pCtx->XState.x87.aRegs[2].r80;5301 p Ctx->XState.x87.aRegs[2].r80 = pCtx->XState.x87.aRegs[1].r80;5302 p Ctx->XState.x87.aRegs[1].r80 = pCtx->XState.x87.aRegs[0].r80;5303 p Ctx->XState.x87.aRegs[0].r80 = r80Tmp;5295 DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx) 5296 { 5297 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80; 5298 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80; 5299 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80; 5300 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80; 5301 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80; 5302 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80; 5303 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80; 5304 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80; 5305 pFpuCtx->aRegs[0].r80 = r80Tmp; 5304 5306 } 5305 5307 … … 5308 5310 * Rotates the stack registers in the pop direction. 5309 5311 * 5310 * @param p Ctx The CPU context.5312 * @param pFpuCtx The FPU context. 5311 5313 * @remarks This is a complete waste of time, but fxsave stores the registers in 5312 5314 * stack order. 5313 5315 */ 5314 DECLINLINE(void) iemFpuRotateStackPop(P CPUMCTX pCtx)5315 { 5316 RTFLOAT80U r80Tmp = p Ctx->XState.x87.aRegs[0].r80;5317 p Ctx->XState.x87.aRegs[0].r80 = pCtx->XState.x87.aRegs[1].r80;5318 p Ctx->XState.x87.aRegs[1].r80 = pCtx->XState.x87.aRegs[2].r80;5319 p Ctx->XState.x87.aRegs[2].r80 = pCtx->XState.x87.aRegs[3].r80;5320 p Ctx->XState.x87.aRegs[3].r80 = pCtx->XState.x87.aRegs[4].r80;5321 p Ctx->XState.x87.aRegs[4].r80 = pCtx->XState.x87.aRegs[5].r80;5322 p Ctx->XState.x87.aRegs[5].r80 = pCtx->XState.x87.aRegs[6].r80;5323 p Ctx->XState.x87.aRegs[6].r80 = pCtx->XState.x87.aRegs[7].r80;5324 p Ctx->XState.x87.aRegs[7].r80 = r80Tmp;5316 DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx) 5317 { 5318 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80; 5319 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80; 5320 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80; 5321 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80; 5322 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80; 5323 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80; 5324 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80; 5325 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80; 5326 pFpuCtx->aRegs[7].r80 = r80Tmp; 5325 5327 } 5326 5328 … … 5332 5334 * @param pIemCpu The IEM per CPU data. 5333 5335 * @param pResult The FPU operation result to push. 5334 * @param p Ctx The CPU context.5335 */ 5336 static void iemFpuMaybePushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, P CPUMCTX pCtx)5336 * @param pFpuCtx The FPU context. 5337 */ 5338 static void iemFpuMaybePushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) 5337 5339 { 5338 5340 /* Update FSW and bail if there are pending exceptions afterwards. */ 5339 uint16_t fFsw = p Ctx->XState.x87.FSW & ~X86_FSW_C_MASK;5341 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK; 5340 5342 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK; 5341 if ( (fFsw 5342 & ~(p Ctx->XState.x87.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))5343 { 5344 p Ctx->XState.x87.FSW = fFsw;5343 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE)) 5344 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM))) 5345 { 5346 pFpuCtx->FSW = fFsw; 5345 5347 return; 5346 5348 } 5347 5349 5348 5350 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK; 5349 if (!(p Ctx->XState.x87.FTW & RT_BIT(iNewTop)))5351 if (!(pFpuCtx->FTW & RT_BIT(iNewTop))) 5350 5352 { 5351 5353 /* All is fine, push the actual value. */ 5352 p Ctx->XState.x87.FTW |= RT_BIT(iNewTop);5353 p Ctx->XState.x87.aRegs[7].r80 = pResult->r80Result;5354 } 5355 else if (p Ctx->XState.x87.FCW & X86_FCW_IM)5354 pFpuCtx->FTW |= RT_BIT(iNewTop); 5355 pFpuCtx->aRegs[7].r80 = pResult->r80Result; 5356 } 5357 else if (pFpuCtx->FCW & X86_FCW_IM) 5356 5358 { 5357 5359 /* Masked stack overflow, push QNaN. */ 5358 5360 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1; 5359 iemFpuStoreQNan(&p Ctx->XState.x87.aRegs[7].r80);5361 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80); 5360 5362 } 5361 5363 else 5362 5364 { 5363 5365 /* Raise stack overflow, don't push anything. */ 5364 p Ctx->XState.x87.FSW |= pResult->FSW & ~X86_FSW_C_MASK;5365 p Ctx->XState.x87.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;5366 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK; 5367 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES; 5366 5368 return; 5367 5369 } … … 5369 5371 fFsw &= ~X86_FSW_TOP_MASK; 5370 5372 fFsw |= iNewTop << X86_FSW_TOP_SHIFT; 5371 p Ctx->XState.x87.FSW = fFsw;5372 5373 iemFpuRotateStackPush(p Ctx);5373 pFpuCtx->FSW = fFsw; 5374 5375 iemFpuRotateStackPush(pFpuCtx); 5374 5376 } 5375 5377 … … 5377 5379 /** 5378 5380 * Stores a result in a FPU register and updates the FSW and FTW. 5381 * 5382 * @param pFpuCtx The FPU context. 5383 * @param pResult The result to store. 5384 * @param iStReg Which FPU register to store it in. 5385 */ 5386 static void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) 5387 { 5388 Assert(iStReg < 8); 5389 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK; 5390 pFpuCtx->FSW &= ~X86_FSW_C_MASK; 5391 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK; 5392 pFpuCtx->FTW |= RT_BIT(iReg); 5393 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result; 5394 } 5395 5396 5397 /** 5398 * Only updates the FPU status word (FSW) with the result of the current 5399 * instruction. 5400 * 5401 * @param pFpuCtx The FPU context. 5402 * @param u16FSW The FSW output of the current instruction. 5403 */ 5404 static void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW) 5405 { 5406 pFpuCtx->FSW &= ~X86_FSW_C_MASK; 5407 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK; 5408 } 5409 5410 5411 /** 5412 * Pops one item off the FPU stack if no pending exception prevents it. 5413 * 5414 * @param pFpuCtx The FPU context. 5415 */ 5416 static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) 5417 { 5418 /* Check pending exceptions. */ 5419 uint16_t uFSW = pFpuCtx->FSW; 5420 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE)) 5421 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM))) 5422 return; 5423 5424 /* TOP--. */ 5425 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK; 5426 uFSW &= ~X86_FSW_TOP_MASK; 5427 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK; 5428 pFpuCtx->FSW = uFSW; 5429 5430 /* Mark the previous ST0 as empty. */ 5431 iOldTop >>= X86_FSW_TOP_SHIFT; 5432 pFpuCtx->FTW &= ~RT_BIT(iOldTop); 5433 5434 /* Rotate the registers. */ 5435 iemFpuRotateStackPop(pFpuCtx); 5436 } 5437 5438 5439 /** 5440 * Pushes a FPU result onto the FPU stack if no pending exception prevents it. 5441 * 5442 * @param pIemCpu The IEM per CPU data. 5443 * @param pResult The FPU operation result to push. 5444 */ 5445 static void iemFpuPushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult) 5446 { 5447 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 5448 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 5449 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx); 5450 iemFpuMaybePushResult(pIemCpu, pResult, pFpuCtx); 5451 } 5452 5453 5454 /** 5455 * Pushes a FPU result onto the FPU stack if no pending exception prevents it, 5456 * and sets FPUDP and FPUDS. 5457 * 5458 * @param pIemCpu The IEM per CPU data. 5459 * @param pResult The FPU operation result to push. 5460 * @param iEffSeg The effective segment register. 5461 * @param GCPtrEff The effective address relative to @a iEffSeg. 5462 */ 5463 static void iemFpuPushResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff) 5464 { 5465 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 5466 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 5467 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff); 5468 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx); 5469 iemFpuMaybePushResult(pIemCpu, pResult, pFpuCtx); 5470 } 5471 5472 5473 /** 5474 * Replace ST0 with the first value and push the second onto the FPU stack, 5475 * unless a pending exception prevents it. 5476 * 5477 * @param pIemCpu The IEM per CPU data. 5478 * @param pResult The FPU operation result to store and push. 5479 */ 5480 static void iemFpuPushResultTwo(PIEMCPU pIemCpu, PIEMFPURESULTTWO pResult) 5481 { 5482 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 5483 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 5484 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx); 5485 5486 /* Update FSW and bail if there are pending exceptions afterwards. */ 5487 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK; 5488 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK; 5489 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE)) 5490 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM))) 5491 { 5492 pFpuCtx->FSW = fFsw; 5493 return; 5494 } 5495 5496 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK; 5497 if (!(pFpuCtx->FTW & RT_BIT(iNewTop))) 5498 { 5499 /* All is fine, push the actual value. */ 5500 pFpuCtx->FTW |= RT_BIT(iNewTop); 5501 pFpuCtx->aRegs[0].r80 = pResult->r80Result1; 5502 pFpuCtx->aRegs[7].r80 = pResult->r80Result2; 5503 } 5504 else if (pFpuCtx->FCW & X86_FCW_IM) 5505 { 5506 /* Masked stack overflow, push QNaN. */ 5507 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1; 5508 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80); 5509 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80); 5510 } 5511 else 5512 { 5513 /* Raise stack overflow, don't push anything. */ 5514 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK; 5515 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES; 5516 return; 5517 } 5518 5519 fFsw &= ~X86_FSW_TOP_MASK; 5520 fFsw |= iNewTop << X86_FSW_TOP_SHIFT; 5521 pFpuCtx->FSW = fFsw; 5522 5523 iemFpuRotateStackPush(pFpuCtx); 5524 } 5525 5526 5527 /** 5528 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and 5529 * FOP. 5379 5530 * 5380 5531 * @param pIemCpu The IEM per CPU data. … … 5383 5534 * @param pCtx The CPU context. 5384 5535 */ 5385 static void iemFpuStoreResultOnly(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, PCPUMCTX pCtx) 5386 { 5387 Assert(iStReg < 8); 5388 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->XState.x87.FSW) + iStReg) & X86_FSW_TOP_SMASK; 5389 pCtx->XState.x87.FSW &= ~X86_FSW_C_MASK; 5390 pCtx->XState.x87.FSW |= pResult->FSW & ~X86_FSW_TOP_MASK; 5391 pCtx->XState.x87.FTW |= RT_BIT(iReg); 5392 pCtx->XState.x87.aRegs[iStReg].r80 = pResult->r80Result; 5393 } 5394 5395 5396 /** 5397 * Only updates the FPU status word (FSW) with the result of the current 5398 * instruction. 5399 * 5400 * @param pCtx The CPU context. 5401 * @param u16FSW The FSW output of the current instruction. 5402 */ 5403 static void iemFpuUpdateFSWOnly(PCPUMCTX pCtx, uint16_t u16FSW) 5404 { 5405 pCtx->XState.x87.FSW &= ~X86_FSW_C_MASK; 5406 pCtx->XState.x87.FSW |= u16FSW & ~X86_FSW_TOP_MASK; 5407 } 5408 5409 5410 /** 5411 * Pops one item off the FPU stack if no pending exception prevents it. 5412 * 5413 * @param pCtx The CPU context. 5414 */ 5415 static void iemFpuMaybePopOne(PCPUMCTX pCtx) 5416 { 5417 /* Check pending exceptions. */ 5418 uint16_t uFSW = pCtx->XState.x87.FSW; 5419 if ( (pCtx->XState.x87.FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE)) 5420 & ~(pCtx->XState.x87.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM))) 5421 return; 5422 5423 /* TOP--. */ 5424 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK; 5425 uFSW &= ~X86_FSW_TOP_MASK; 5426 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK; 5427 pCtx->XState.x87.FSW = uFSW; 5428 5429 /* Mark the previous ST0 as empty. */ 5430 iOldTop >>= X86_FSW_TOP_SHIFT; 5431 pCtx->XState.x87.FTW &= ~RT_BIT(iOldTop); 5432 5433 /* Rotate the registers. */ 5434 iemFpuRotateStackPop(pCtx); 5435 } 5436 5437 5438 /** 5439 * Pushes a FPU result onto the FPU stack if no pending exception prevents it. 5440 * 5441 * @param pIemCpu The IEM per CPU data. 5442 * @param pResult The FPU operation result to push. 5443 */ 5444 static void iemFpuPushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult) 5445 { 5446 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 5447 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx); 5448 iemFpuMaybePushResult(pIemCpu, pResult, pCtx); 5449 } 5450 5451 5452 /** 5453 * Pushes a FPU result onto the FPU stack if no pending exception prevents it, 5454 * and sets FPUDP and FPUDS. 5455 * 5456 * @param pIemCpu The IEM per CPU data. 5457 * @param pResult The FPU operation result to push. 5458 * @param iEffSeg The effective segment register. 5459 * @param GCPtrEff The effective address relative to @a iEffSeg. 5460 */ 5461 static void iemFpuPushResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff) 5462 { 5463 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 5464 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff); 5465 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx); 5466 iemFpuMaybePushResult(pIemCpu, pResult, pCtx); 5467 } 5468 5469 5470 /** 5471 * Replace ST0 with the first value and push the second onto the FPU stack, 5472 * unless a pending exception prevents it. 5473 * 5474 * @param pIemCpu The IEM per CPU data. 5475 * @param pResult The FPU operation result to store and push. 5476 */ 5477 static void iemFpuPushResultTwo(PIEMCPU pIemCpu, PIEMFPURESULTTWO pResult) 5478 { 5479 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 5480 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx); 5481 5482 /* Update FSW and bail if there are pending exceptions afterwards. */ 5483 uint16_t fFsw = pCtx->XState.x87.FSW & ~X86_FSW_C_MASK; 5484 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK; 5485 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE)) 5486 & ~(pCtx->XState.x87.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM))) 5487 { 5488 pCtx->XState.x87.FSW = fFsw; 5489 return; 5490 } 5491 5492 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK; 5493 if (!(pCtx->XState.x87.FTW & RT_BIT(iNewTop))) 5494 { 5495 /* All is fine, push the actual value. */ 5496 pCtx->XState.x87.FTW |= RT_BIT(iNewTop); 5497 pCtx->XState.x87.aRegs[0].r80 = pResult->r80Result1; 5498 pCtx->XState.x87.aRegs[7].r80 = pResult->r80Result2; 5499 } 5500 else if (pCtx->XState.x87.FCW & X86_FCW_IM) 5501 { 5502 /* Masked stack overflow, push QNaN. */ 5503 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1; 5504 iemFpuStoreQNan(&pCtx->XState.x87.aRegs[0].r80); 5505 iemFpuStoreQNan(&pCtx->XState.x87.aRegs[7].r80); 5506 } 5507 else 5508 { 5509 /* Raise stack overflow, don't push anything. */ 5510 pCtx->XState.x87.FSW |= pResult->FSW & ~X86_FSW_C_MASK; 5511 pCtx->XState.x87.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES; 5512 return; 5513 } 5514 5515 fFsw &= ~X86_FSW_TOP_MASK; 5516 fFsw |= iNewTop << X86_FSW_TOP_SHIFT; 5517 pCtx->XState.x87.FSW = fFsw; 5518 5519 iemFpuRotateStackPush(pCtx); 5536 static void iemFpuStoreResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg) 5537 { 5538 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 5539 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 5540 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx); 5541 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg); 5520 5542 } 5521 5543 … … 5523 5545 /** 5524 5546 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and 5525 * FOP .5547 * FOP, and then pops the stack. 5526 5548 * 5527 5549 * @param pIemCpu The IEM per CPU data. … … 5530 5552 * @param pCtx The CPU context. 5531 5553 */ 5532 static void iemFpuStoreResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)5533 {5534 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);5535 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);5536 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);5537 }5538 5539 5540 /**5541 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and5542 * FOP, and then pops the stack.5543 *5544 * @param pIemCpu The IEM per CPU data.5545 * @param pResult The result to store.5546 * @param iStReg Which FPU register to store it in.5547 * @param pCtx The CPU context.5548 */5549 5554 static void iemFpuStoreResultThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg) 5550 5555 { 5551 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 5552 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx); 5553 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx); 5554 iemFpuMaybePopOne(pCtx); 5556 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 5557 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 5558 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx); 5559 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg); 5560 iemFpuMaybePopOne(pFpuCtx); 5555 5561 } 5556 5562 … … 5569 5575 static void iemFpuStoreResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) 5570 5576 { 5571 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 5572 iemFpuUpdateDP(pIemCpu, pIemCpu->CTX_SUFF(pCtx), iEffSeg, GCPtrEff); 5573 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx); 5574 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx); 5577 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 5578 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 5579 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff); 5580 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx); 5581 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg); 5575 5582 } 5576 5583 … … 5590 5597 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) 5591 5598 { 5592 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 5593 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff); 5594 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx); 5595 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx); 5596 iemFpuMaybePopOne(pCtx); 5599 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 5600 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 5601 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff); 5602 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx); 5603 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg); 5604 iemFpuMaybePopOne(pFpuCtx); 5597 5605 } 5598 5606 … … 5605 5613 static void iemFpuUpdateOpcodeAndIp(PIEMCPU pIemCpu) 5606 5614 { 5607 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pIemCpu->CTX_SUFF(pCtx)); 5615 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 5616 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 5617 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx); 5608 5618 } 5609 5619 … … 5618 5628 { 5619 5629 Assert(iStReg < 8); 5620 P CPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);5621 uint8_t iReg = (X86_FSW_TOP_GET(pCtx->XState.x87.FSW) + iStReg) & X86_FSW_TOP_SMASK;5622 p Ctx->XState.x87.FTW &= ~RT_BIT(iReg);5630 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87; 5631 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK; 5632 pFpuCtx->FTW &= ~RT_BIT(iReg); 5623 5633 } 5624 5634 … … 5631 5641 static void iemFpuStackIncTop(PIEMCPU pIemCpu) 5632 5642 { 5633 P CPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);5634 uint16_t uFsw = pCtx->XState.x87.FSW;5635 uint16_t uTop= uFsw & X86_FSW_TOP_MASK;5643 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87; 5644 uint16_t uFsw = pFpuCtx->FSW; 5645 uint16_t uTop = uFsw & X86_FSW_TOP_MASK; 5636 5646 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK; 5637 5647 uFsw &= ~X86_FSW_TOP_MASK; 5638 5648 uFsw |= uTop; 5639 p Ctx->XState.x87.FSW = uFsw;5649 pFpuCtx->FSW = uFsw; 5640 5650 } 5641 5651 … … 5648 5658 static void iemFpuStackDecTop(PIEMCPU pIemCpu) 5649 5659 { 5650 P CPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);5651 uint16_t uFsw = pCtx->XState.x87.FSW;5652 uint16_t uTop= uFsw & X86_FSW_TOP_MASK;5660 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87; 5661 uint16_t uFsw = pFpuCtx->FSW; 5662 uint16_t uTop = uFsw & X86_FSW_TOP_MASK; 5653 5663 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK; 5654 5664 uFsw &= ~X86_FSW_TOP_MASK; 5655 5665 uFsw |= uTop; 5656 p Ctx->XState.x87.FSW = uFsw;5666 pFpuCtx->FSW = uFsw; 5657 5667 } 5658 5668 … … 5666 5676 static void iemFpuUpdateFSW(PIEMCPU pIemCpu, uint16_t u16FSW) 5667 5677 { 5668 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 5669 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx); 5670 iemFpuUpdateFSWOnly(pCtx, u16FSW); 5678 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 5679 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 5680 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx); 5681 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW); 5671 5682 } 5672 5683 … … 5680 5691 static void iemFpuUpdateFSWThenPop(PIEMCPU pIemCpu, uint16_t u16FSW) 5681 5692 { 5682 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 5683 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx); 5684 iemFpuUpdateFSWOnly(pCtx, u16FSW); 5685 iemFpuMaybePopOne(pCtx); 5693 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 5694 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 5695 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx); 5696 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW); 5697 iemFpuMaybePopOne(pFpuCtx); 5686 5698 } 5687 5699 … … 5697 5709 static void iemFpuUpdateFSWWithMemOp(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) 5698 5710 { 5699 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 5700 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff); 5701 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx); 5702 iemFpuUpdateFSWOnly(pCtx, u16FSW); 5711 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 5712 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 5713 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff); 5714 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx); 5715 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW); 5703 5716 } 5704 5717 … … 5712 5725 static void iemFpuUpdateFSWThenPopPop(PIEMCPU pIemCpu, uint16_t u16FSW) 5713 5726 { 5714 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 5715 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx); 5716 iemFpuUpdateFSWOnly(pCtx, u16FSW); 5717 iemFpuMaybePopOne(pCtx); 5718 iemFpuMaybePopOne(pCtx); 5727 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 5728 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 5729 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx); 5730 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW); 5731 iemFpuMaybePopOne(pFpuCtx); 5732 iemFpuMaybePopOne(pFpuCtx); 5719 5733 } 5720 5734 … … 5730 5744 static void iemFpuUpdateFSWWithMemOpThenPop(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) 5731 5745 { 5732 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 5733 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff); 5734 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx); 5735 iemFpuUpdateFSWOnly(pCtx, u16FSW); 5736 iemFpuMaybePopOne(pCtx); 5746 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 5747 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 5748 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff); 5749 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx); 5750 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW); 5751 iemFpuMaybePopOne(pFpuCtx); 5737 5752 } 5738 5753 … … 5742 5757 * 5743 5758 * @param pIemCpu The IEM per CPU data. 5759 * @param pFpuCtx The FPU context. 5744 5760 * @param iStReg The stack register being accessed. 5745 * @param pCtx The CPU context. 5746 */ 5747 static void iemFpuStackUnderflowOnly(PIEMCPU pIemCpu, uint8_t iStReg, PCPUMCTX pCtx) 5761 */ 5762 static void iemFpuStackUnderflowOnly(PIEMCPU pIemCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg) 5748 5763 { 5749 5764 Assert(iStReg < 8 || iStReg == UINT8_MAX); 5750 if (p Ctx->XState.x87.FCW & X86_FCW_IM)5765 if (pFpuCtx->FCW & X86_FCW_IM) 5751 5766 { 5752 5767 /* Masked underflow. */ 5753 p Ctx->XState.x87.FSW &= ~X86_FSW_C_MASK;5754 p Ctx->XState.x87.FSW |= X86_FSW_IE | X86_FSW_SF;5755 uint16_t iReg = (X86_FSW_TOP_GET(p Ctx->XState.x87.FSW) + iStReg) & X86_FSW_TOP_SMASK;5768 pFpuCtx->FSW &= ~X86_FSW_C_MASK; 5769 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF; 5770 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK; 5756 5771 if (iStReg != UINT8_MAX) 5757 5772 { 5758 p Ctx->XState.x87.FTW |= RT_BIT(iReg);5759 iemFpuStoreQNan(&p Ctx->XState.x87.aRegs[iStReg].r80);5773 pFpuCtx->FTW |= RT_BIT(iReg); 5774 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80); 5760 5775 } 5761 5776 } 5762 5777 else 5763 5778 { 5764 p Ctx->XState.x87.FSW &= ~X86_FSW_C_MASK;5765 p Ctx->XState.x87.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;5779 pFpuCtx->FSW &= ~X86_FSW_C_MASK; 5780 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B; 5766 5781 } 5767 5782 } … … 5778 5793 DECL_NO_INLINE(static, void) iemFpuStackUnderflow(PIEMCPU pIemCpu, uint8_t iStReg) 5779 5794 { 5780 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 5781 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx); 5782 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx); 5795 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 5796 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 5797 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx); 5798 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg); 5783 5799 } 5784 5800 … … 5787 5803 iemFpuStackUnderflowWithMemOp(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) 5788 5804 { 5789 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 5790 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff); 5791 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx); 5792 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx); 5805 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 5806 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 5807 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff); 5808 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx); 5809 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg); 5793 5810 } 5794 5811 … … 5796 5813 DECL_NO_INLINE(static, void) iemFpuStackUnderflowThenPop(PIEMCPU pIemCpu, uint8_t iStReg) 5797 5814 { 5798 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 5799 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx); 5800 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx); 5801 iemFpuMaybePopOne(pCtx); 5815 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 5816 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 5817 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx); 5818 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg); 5819 iemFpuMaybePopOne(pFpuCtx); 5802 5820 } 5803 5821 … … 5806 5824 iemFpuStackUnderflowWithMemOpThenPop(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) 5807 5825 { 5808 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 5809 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff); 5810 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx); 5811 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx); 5812 iemFpuMaybePopOne(pCtx); 5826 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 5827 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 5828 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff); 5829 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx); 5830 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg); 5831 iemFpuMaybePopOne(pFpuCtx); 5813 5832 } 5814 5833 … … 5816 5835 DECL_NO_INLINE(static, void) iemFpuStackUnderflowThenPopPop(PIEMCPU pIemCpu) 5817 5836 { 5818 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 5819 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx); 5820 iemFpuStackUnderflowOnly(pIemCpu, UINT8_MAX, pCtx); 5821 iemFpuMaybePopOne(pCtx); 5822 iemFpuMaybePopOne(pCtx); 5837 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 5838 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 5839 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx); 5840 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, UINT8_MAX); 5841 iemFpuMaybePopOne(pFpuCtx); 5842 iemFpuMaybePopOne(pFpuCtx); 5823 5843 } 5824 5844 … … 5827 5847 iemFpuStackPushUnderflow(PIEMCPU pIemCpu) 5828 5848 { 5829 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 5830 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx); 5831 5832 if (pCtx->XState.x87.FCW & X86_FCW_IM) 5849 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 5850 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 5851 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx); 5852 5853 if (pFpuCtx->FCW & X86_FCW_IM) 5833 5854 { 5834 5855 /* Masked overflow - Push QNaN. */ 5835 uint16_t iNewTop = (X86_FSW_TOP_GET(p Ctx->XState.x87.FSW) + 7) & X86_FSW_TOP_SMASK;5836 p Ctx->XState.x87.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);5837 p Ctx->XState.x87.FSW |= X86_FSW_IE | X86_FSW_SF;5838 p Ctx->XState.x87.FSW |= iNewTop << X86_FSW_TOP_SHIFT;5839 p Ctx->XState.x87.FTW |= RT_BIT(iNewTop);5840 iemFpuStoreQNan(&p Ctx->XState.x87.aRegs[7].r80);5841 iemFpuRotateStackPush(p Ctx);5856 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK; 5857 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK); 5858 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF; 5859 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT; 5860 pFpuCtx->FTW |= RT_BIT(iNewTop); 5861 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80); 5862 iemFpuRotateStackPush(pFpuCtx); 5842 5863 } 5843 5864 else 5844 5865 { 5845 5866 /* Exception pending - don't change TOP or the register stack. */ 5846 p Ctx->XState.x87.FSW &= ~X86_FSW_C_MASK;5847 p Ctx->XState.x87.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;5867 pFpuCtx->FSW &= ~X86_FSW_C_MASK; 5868 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B; 5848 5869 } 5849 5870 } … … 5853 5874 iemFpuStackPushUnderflowTwo(PIEMCPU pIemCpu) 5854 5875 { 5855 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 5856 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx); 5857 5858 if (pCtx->XState.x87.FCW & X86_FCW_IM) 5876 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 5877 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 5878 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx); 5879 5880 if (pFpuCtx->FCW & X86_FCW_IM) 5859 5881 { 5860 5882 /* Masked overflow - Push QNaN. */ 5861 uint16_t iNewTop = (X86_FSW_TOP_GET(p Ctx->XState.x87.FSW) + 7) & X86_FSW_TOP_SMASK;5862 p Ctx->XState.x87.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);5863 p Ctx->XState.x87.FSW |= X86_FSW_IE | X86_FSW_SF;5864 p Ctx->XState.x87.FSW |= iNewTop << X86_FSW_TOP_SHIFT;5865 p Ctx->XState.x87.FTW |= RT_BIT(iNewTop);5866 iemFpuStoreQNan(&p Ctx->XState.x87.aRegs[0].r80);5867 iemFpuStoreQNan(&p Ctx->XState.x87.aRegs[7].r80);5868 iemFpuRotateStackPush(p Ctx);5883 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK; 5884 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK); 5885 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF; 5886 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT; 5887 pFpuCtx->FTW |= RT_BIT(iNewTop); 5888 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80); 5889 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80); 5890 iemFpuRotateStackPush(pFpuCtx); 5869 5891 } 5870 5892 else 5871 5893 { 5872 5894 /* Exception pending - don't change TOP or the register stack. */ 5873 p Ctx->XState.x87.FSW &= ~X86_FSW_C_MASK;5874 p Ctx->XState.x87.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;5895 pFpuCtx->FSW &= ~X86_FSW_C_MASK; 5896 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B; 5875 5897 } 5876 5898 } … … 5880 5902 * Worker routine for raising an FPU stack overflow exception on a push. 5881 5903 * 5904 * @param pFpuCtx The FPU context. 5905 */ 5906 static void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx) 5907 { 5908 if (pFpuCtx->FCW & X86_FCW_IM) 5909 { 5910 /* Masked overflow. */ 5911 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK; 5912 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK); 5913 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF; 5914 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT; 5915 pFpuCtx->FTW |= RT_BIT(iNewTop); 5916 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80); 5917 iemFpuRotateStackPush(pFpuCtx); 5918 } 5919 else 5920 { 5921 /* Exception pending - don't change TOP or the register stack. */ 5922 pFpuCtx->FSW &= ~X86_FSW_C_MASK; 5923 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B; 5924 } 5925 } 5926 5927 5928 /** 5929 * Raises a FPU stack overflow exception on a push. 5930 * 5882 5931 * @param pIemCpu The IEM per CPU data. 5883 * @param pCtx The CPU context.5884 */5885 static void iemFpuStackPushOverflowOnly(PIEMCPU pIemCpu, PCPUMCTX pCtx)5886 {5887 if (pCtx->XState.x87.FCW & X86_FCW_IM)5888 {5889 /* Masked overflow. */5890 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->XState.x87.FSW) + 7) & X86_FSW_TOP_SMASK;5891 pCtx->XState.x87.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);5892 pCtx->XState.x87.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;5893 pCtx->XState.x87.FSW |= iNewTop << X86_FSW_TOP_SHIFT;5894 pCtx->XState.x87.FTW |= RT_BIT(iNewTop);5895 iemFpuStoreQNan(&pCtx->XState.x87.aRegs[7].r80);5896 iemFpuRotateStackPush(pCtx);5897 }5898 else5899 {5900 /* Exception pending - don't change TOP or the register stack. */5901 pCtx->XState.x87.FSW &= ~X86_FSW_C_MASK;5902 pCtx->XState.x87.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;5903 }5904 }5905 5906 5907 /**5908 * Raises a FPU stack overflow exception on a push.5909 *5910 * @param pIemCpu The IEM per CPU data.5911 5932 */ 5912 5933 DECL_NO_INLINE(static, void) iemFpuStackPushOverflow(PIEMCPU pIemCpu) 5913 5934 { 5914 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 5915 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx); 5916 iemFpuStackPushOverflowOnly(pIemCpu, pCtx); 5935 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 5936 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 5937 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx); 5938 iemFpuStackPushOverflowOnly(pFpuCtx); 5917 5939 } 5918 5940 … … 5928 5950 iemFpuStackPushOverflowWithMemOp(PIEMCPU pIemCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff) 5929 5951 { 5930 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 5931 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff); 5932 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx); 5933 iemFpuStackPushOverflowOnly(pIemCpu, pCtx); 5952 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 5953 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 5954 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff); 5955 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx); 5956 iemFpuStackPushOverflowOnly(pFpuCtx); 5934 5957 } 5935 5958 … … 5937 5960 static int iemFpuStRegNotEmpty(PIEMCPU pIemCpu, uint8_t iStReg) 5938 5961 { 5939 P CPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);5940 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->XState.x87.FSW) + iStReg) & X86_FSW_TOP_SMASK;5941 if (p Ctx->XState.x87.FTW & RT_BIT(iReg))5962 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87; 5963 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK; 5964 if (pFpuCtx->FTW & RT_BIT(iReg)) 5942 5965 return VINF_SUCCESS; 5943 5966 return VERR_NOT_FOUND; … … 5947 5970 static int iemFpuStRegNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef) 5948 5971 { 5949 P CPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);5950 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->XState.x87.FSW) + iStReg) & X86_FSW_TOP_SMASK;5951 if (p Ctx->XState.x87.FTW & RT_BIT(iReg))5952 { 5953 *ppRef = &p Ctx->XState.x87.aRegs[iStReg].r80;5972 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87; 5973 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK; 5974 if (pFpuCtx->FTW & RT_BIT(iReg)) 5975 { 5976 *ppRef = &pFpuCtx->aRegs[iStReg].r80; 5954 5977 return VINF_SUCCESS; 5955 5978 } … … 5961 5984 uint8_t iStReg1, PCRTFLOAT80U *ppRef1) 5962 5985 { 5963 P CPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);5964 uint16_t iTop = X86_FSW_TOP_GET(pCtx->XState.x87.FSW);5965 uint16_t iReg0= (iTop + iStReg0) & X86_FSW_TOP_SMASK;5966 uint16_t iReg1= (iTop + iStReg1) & X86_FSW_TOP_SMASK;5967 if ((p Ctx->XState.x87.FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))5968 { 5969 *ppRef0 = &p Ctx->XState.x87.aRegs[iStReg0].r80;5970 *ppRef1 = &p Ctx->XState.x87.aRegs[iStReg1].r80;5986 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87; 5987 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW); 5988 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK; 5989 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK; 5990 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1))) 5991 { 5992 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80; 5993 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80; 5971 5994 return VINF_SUCCESS; 5972 5995 } … … 5977 6000 static int iemFpu2StRegsNotEmptyRefFirst(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1) 5978 6001 { 5979 P CPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);5980 uint16_t iTop = X86_FSW_TOP_GET(pCtx->XState.x87.FSW);5981 uint16_t iReg0= (iTop + iStReg0) & X86_FSW_TOP_SMASK;5982 uint16_t iReg1= (iTop + iStReg1) & X86_FSW_TOP_SMASK;5983 if ((p Ctx->XState.x87.FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))5984 { 5985 *ppRef0 = &p Ctx->XState.x87.aRegs[iStReg0].r80;6002 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87; 6003 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW); 6004 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK; 6005 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK; 6006 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1))) 6007 { 6008 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80; 5986 6009 return VINF_SUCCESS; 5987 6010 } … … 5993 6016 * Updates the FPU exception status after FCW is changed. 5994 6017 * 5995 * @param p Ctx The CPU context.5996 */ 5997 static void iemFpuRecalcExceptionStatus(P CPUMCTX pCtx)5998 { 5999 uint16_t u16Fsw = p Ctx->XState.x87.FSW;6000 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(p Ctx->XState.x87.FCW & X86_FCW_XCPT_MASK))6018 * @param pFpuCtx The FPU context. 6019 */ 6020 static void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx) 6021 { 6022 uint16_t u16Fsw = pFpuCtx->FSW; 6023 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK)) 6001 6024 u16Fsw |= X86_FSW_ES | X86_FSW_B; 6002 6025 else 6003 6026 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B); 6004 p Ctx->XState.x87.FSW = u16Fsw;6027 pFpuCtx->FSW = u16Fsw; 6005 6028 } 6006 6029 … … 6010 6033 * 6011 6034 * @returns The full FTW. 6012 * @param p Ctx The CPU state.6013 */ 6014 static uint16_t iemFpuCalcFullFtw(PC CPUMCTX pCtx)6015 { 6016 uint8_t const u8Ftw = (uint8_t)p Ctx->XState.x87.FTW;6035 * @param pFpuCtx The FPU context. 6036 */ 6037 static uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx) 6038 { 6039 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW; 6017 6040 uint16_t u16Ftw = 0; 6018 unsigned const iTop = X86_FSW_TOP_GET(p Ctx->XState.x87.FSW);6041 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW); 6019 6042 for (unsigned iSt = 0; iSt < 8; iSt++) 6020 6043 { … … 6025 6048 { 6026 6049 uint16_t uTag; 6027 PCRTFLOAT80U const pr80Reg = &p Ctx->XState.x87.aRegs[iSt].r80;6050 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80; 6028 6051 if (pr80Reg->s.uExponent == 0x7fff) 6029 6052 uTag = 2; /* Exponent is all 1's => Special. */ … … 7172 7195 /* The lazy approach for now... */ 7173 7196 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */ 7174 if ((GCPtrMem & 15) && !(pIemCpu->CTX_SUFF(pCtx)->XState.x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */ 7197 if ( (GCPtrMem & 15) 7198 && !(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */ 7175 7199 return iemRaiseGeneralProtectionFault0(pIemCpu); 7176 7200 … … 7372 7396 { 7373 7397 /* The lazy approach for now... */ 7374 if ((GCPtrMem & 15) && !(pIemCpu->CTX_SUFF(pCtx)->XState.x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */ 7398 if ( (GCPtrMem & 15) 7399 && !(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */ 7375 7400 return iemRaiseGeneralProtectionFault0(pIemCpu); 7376 7401 … … 8304 8329 #define IEM_MC_MAYBE_RAISE_FPU_XCPT() \ 8305 8330 do { \ 8306 if ((pIemCpu)->CTX_SUFF(pCtx)-> XState.x87.FSW & X86_FSW_ES) \8331 if ((pIemCpu)->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \ 8307 8332 return iemRaiseMathFault(pIemCpu); \ 8308 8333 } while (0) … … 8387 8412 #define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pIemCpu)->CTX_SUFF(pCtx)->eflags.u 8388 8413 #define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pIemCpu)->CTX_SUFF(pCtx)->eflags.u 8389 #define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pIemCpu->CTX_SUFF(pCtx)-> XState.x87.FSW8390 #define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pIemCpu->CTX_SUFF(pCtx)-> XState.x87.FCW8414 #define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW 8415 #define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW 8391 8416 8392 8417 #define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pIemCpu, (a_iGReg)) = (a_u8Value) … … 8401 8426 #define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0) 8402 8427 #define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \ 8403 do { pIemCpu->CTX_SUFF(pCtx)-> XState.x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)8428 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0) 8404 8429 8405 8430 #define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pIemCpu, (a_iGReg)) … … 8492 8517 #define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0) 8493 8518 8494 #define IEM_MC_CLEAR_FSW_EX() do { (pIemCpu)->CTX_SUFF(pCtx)-> XState.x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)8519 #define IEM_MC_CLEAR_FSW_EX() do { (pIemCpu)->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0) 8495 8520 8496 8521 8497 8522 #define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \ 8498 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)-> XState.x87.aRegs[(a_iMReg)].mmx; } while (0)8523 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0) 8499 8524 #define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \ 8500 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)-> XState.x87.aRegs[(a_iMReg)].au32[0]; } while (0)8525 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0) 8501 8526 #define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \ 8502 do { pIemCpu->CTX_SUFF(pCtx)-> XState.x87.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)8527 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0) 8503 8528 #define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \ 8504 do { pIemCpu->CTX_SUFF(pCtx)-> XState.x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)8529 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0) 8505 8530 #define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \ 8506 (a_pu64Dst) = (&pIemCpu->CTX_SUFF(pCtx)-> XState.x87.aRegs[(a_iMReg)].mmx)8531 (a_pu64Dst) = (&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx) 8507 8532 #define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \ 8508 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)-> XState.x87.aRegs[(a_iMReg)].mmx)8533 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx) 8509 8534 #define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \ 8510 (a_pu32Dst) = ((uint32_t const *)&pIemCpu->CTX_SUFF(pCtx)-> XState.x87.aRegs[(a_iMReg)].mmx)8535 (a_pu32Dst) = ((uint32_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx) 8511 8536 8512 8537 #define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \ 8513 do { (a_u128Value) = pIemCpu->CTX_SUFF(pCtx)-> XState.x87.aXMM[(a_iXReg)].xmm; } while (0)8538 do { (a_u128Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm; } while (0) 8514 8539 #define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \ 8515 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)-> XState.x87.aXMM[(a_iXReg)].au64[0]; } while (0)8540 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0) 8516 8541 #define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \ 8517 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)-> XState.x87.aXMM[(a_iXReg)].au32[0]; } while (0)8542 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0) 8518 8543 #define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \ 8519 do { pIemCpu->CTX_SUFF(pCtx)-> XState.x87.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0)8544 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0) 8520 8545 #define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \ 8521 do { pIemCpu->CTX_SUFF(pCtx)-> XState.x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \8522 pIemCpu->CTX_SUFF(pCtx)-> XState.x87.aXMM[(a_iXReg)].au64[1] = 0; \8546 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \ 8547 pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \ 8523 8548 } while (0) 8524 8549 #define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \ 8525 do { pIemCpu->CTX_SUFF(pCtx)-> XState.x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \8526 pIemCpu->CTX_SUFF(pCtx)-> XState.x87.aXMM[(a_iXReg)].au64[1] = 0; \8550 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \ 8551 pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \ 8527 8552 } while (0) 8528 8553 #define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \ 8529 (a_pu128Dst) = (&pIemCpu->CTX_SUFF(pCtx)-> XState.x87.aXMM[(a_iXReg)].xmm)8554 (a_pu128Dst) = (&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm) 8530 8555 #define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \ 8531 (a_pu128Dst) = ((uint128_t const *)&pIemCpu->CTX_SUFF(pCtx)-> XState.x87.aXMM[(a_iXReg)].xmm)8556 (a_pu128Dst) = ((uint128_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm) 8532 8557 #define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \ 8533 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)-> XState.x87.aXMM[(a_iXReg)].au64[0])8558 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]) 8534 8559 8535 8560 #define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \ … … 8739 8764 if ( !(a_u16FSW & X86_FSW_ES) \ 8740 8765 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \ 8741 & ~(pIemCpu->CTX_SUFF(pCtx)-> XState.x87.FCW & X86_FCW_MASK_ALL) ) ) \8766 & ~(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \ 8742 8767 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess))); \ 8743 8768 } while (0) … … 8875 8900 do { \ 8876 8901 iemFpuPrepareUsage(pIemCpu); \ 8877 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)-> XState.x87, (a0)); \8902 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0)); \ 8878 8903 } while (0) 8879 8904 … … 8888 8913 do { \ 8889 8914 iemFpuPrepareUsage(pIemCpu); \ 8890 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)-> XState.x87, (a0), (a1)); \8915 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \ 8891 8916 } while (0) 8892 8917 … … 8902 8927 do { \ 8903 8928 iemFpuPrepareUsage(pIemCpu); \ 8904 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)-> XState.x87, (a0), (a1), (a2)); \8929 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \ 8905 8930 } while (0) 8906 8931 … … 9019 9044 do { \ 9020 9045 iemFpuPrepareUsage(pIemCpu); \ 9021 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)-> XState.x87, (a0), (a1)); \9046 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \ 9022 9047 } while (0) 9023 9048 … … 9033 9058 do { \ 9034 9059 iemFpuPrepareUsage(pIemCpu); \ 9035 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)-> XState.x87, (a0), (a1), (a2)); \9060 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \ 9036 9061 } while (0) 9037 9062 … … 9047 9072 do { \ 9048 9073 iemFpuPrepareUsageSse(pIemCpu); \ 9049 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)-> XState.x87, (a0), (a1)); \9074 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \ 9050 9075 } while (0) 9051 9076 … … 9061 9086 do { \ 9062 9087 iemFpuPrepareUsageSse(pIemCpu); \ 9063 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)-> XState.x87, (a0), (a1), (a2)); \9088 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \ 9064 9089 } while (0) 9065 9090 … … 9131 9156 if (iemFpu2StRegsNotEmptyRefFirst(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) { 9132 9157 #define IEM_MC_IF_FCW_IM() \ 9133 if (pIemCpu->CTX_SUFF(pCtx)-> XState.x87.FCW & X86_FCW_IM) {9158 if (pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) { 9134 9159 9135 9160 #define IEM_MC_ELSE() } else { … … 10195 10220 } \ 10196 10221 } while (0) 10222 # define CHECK_XSTATE_FIELD(a_Field) \ 10223 do \ 10224 { \ 10225 if (pOrgXState->a_Field != pDebugXState->a_Field) \ 10226 { \ 10227 switch (sizeof(pOrgCtx->a_Field)) \ 10228 { \ 10229 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \ 10230 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \ 10231 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \ 10232 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \ 10233 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \ 10234 } \ 10235 cDiffs++; \ 10236 } \ 10237 } while (0) 10197 10238 10198 10239 # define CHECK_BIT_FIELD(a_Field) \ … … 10216 10257 } while (0) 10217 10258 10259 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState); 10260 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState); 10261 10218 10262 #if 1 /* The recompiler doesn't update these the intel way. */ 10219 10263 if (fRem) 10220 10264 { 10221 pOrg Ctx->XState.x87.FOP = pDebugCtx->XState.x87.FOP;10222 pOrg Ctx->XState.x87.FPUIP = pDebugCtx->XState.x87.FPUIP;10223 pOrg Ctx->XState.x87.CS = pDebugCtx->XState.x87.CS;10224 pOrg Ctx->XState.x87.Rsrvd1 = pDebugCtx->XState.x87.Rsrvd1;10225 pOrg Ctx->XState.x87.FPUDP = pDebugCtx->XState.x87.FPUDP;10226 pOrg Ctx->XState.x87.DS = pDebugCtx->XState.x87.DS;10227 pOrg Ctx->XState.x87.Rsrvd2 = pDebugCtx->XState.x87.Rsrvd2;10228 //pOrg Ctx->XState.x87.MXCSR_MASK = pDebugCtx->XState.x87.MXCSR_MASK;10229 if ((pOrg Ctx->XState.x87.FSW & X86_FSW_TOP_MASK) == (pDebugCtx->XState.x87.FSW & X86_FSW_TOP_MASK))10230 pOrg Ctx->XState.x87.FSW = pDebugCtx->XState.x87.FSW;10265 pOrgXState->x87.FOP = pDebugXState->x87.FOP; 10266 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP; 10267 pOrgXState->x87.CS = pDebugXState->x87.CS; 10268 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1; 10269 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP; 10270 pOrgXState->x87.DS = pDebugXState->x87.DS; 10271 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2; 10272 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK; 10273 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK)) 10274 pOrgXState->x87.FSW = pDebugXState->x87.FSW; 10231 10275 } 10232 10276 #endif 10233 if (memcmp(&pOrg Ctx->XState.x87, &pDebugCtx->XState.x87, sizeof(pDebugCtx->XState.x87)))10277 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87))) 10234 10278 { 10235 10279 RTAssertMsg2Weak(" the FPU state differs\n"); 10236 10280 cDiffs++; 10237 CHECK_ FIELD(XState.x87.FCW);10238 CHECK_ FIELD(XState.x87.FSW);10239 CHECK_ FIELD(XState.x87.FTW);10240 CHECK_ FIELD(XState.x87.FOP);10241 CHECK_ FIELD(XState.x87.FPUIP);10242 CHECK_ FIELD(XState.x87.CS);10243 CHECK_ FIELD(XState.x87.Rsrvd1);10244 CHECK_ FIELD(XState.x87.FPUDP);10245 CHECK_ FIELD(XState.x87.DS);10246 CHECK_ FIELD(XState.x87.Rsrvd2);10247 CHECK_ FIELD(XState.x87.MXCSR);10248 CHECK_ FIELD(XState.x87.MXCSR_MASK);10249 CHECK_ FIELD(XState.x87.aRegs[0].au64[0]); CHECK_FIELD(XState.x87.aRegs[0].au64[1]);10250 CHECK_ FIELD(XState.x87.aRegs[1].au64[0]); CHECK_FIELD(XState.x87.aRegs[1].au64[1]);10251 CHECK_ FIELD(XState.x87.aRegs[2].au64[0]); CHECK_FIELD(XState.x87.aRegs[2].au64[1]);10252 CHECK_ FIELD(XState.x87.aRegs[3].au64[0]); CHECK_FIELD(XState.x87.aRegs[3].au64[1]);10253 CHECK_ FIELD(XState.x87.aRegs[4].au64[0]); CHECK_FIELD(XState.x87.aRegs[4].au64[1]);10254 CHECK_ FIELD(XState.x87.aRegs[5].au64[0]); CHECK_FIELD(XState.x87.aRegs[5].au64[1]);10255 CHECK_ FIELD(XState.x87.aRegs[6].au64[0]); CHECK_FIELD(XState.x87.aRegs[6].au64[1]);10256 CHECK_ FIELD(XState.x87.aRegs[7].au64[0]); CHECK_FIELD(XState.x87.aRegs[7].au64[1]);10257 CHECK_ FIELD(XState.x87.aXMM[ 0].au64[0]); CHECK_FIELD(XState.x87.aXMM[ 0].au64[1]);10258 CHECK_ FIELD(XState.x87.aXMM[ 1].au64[0]); CHECK_FIELD(XState.x87.aXMM[ 1].au64[1]);10259 CHECK_ FIELD(XState.x87.aXMM[ 2].au64[0]); CHECK_FIELD(XState.x87.aXMM[ 2].au64[1]);10260 CHECK_ FIELD(XState.x87.aXMM[ 3].au64[0]); CHECK_FIELD(XState.x87.aXMM[ 3].au64[1]);10261 CHECK_ FIELD(XState.x87.aXMM[ 4].au64[0]); CHECK_FIELD(XState.x87.aXMM[ 4].au64[1]);10262 CHECK_ FIELD(XState.x87.aXMM[ 5].au64[0]); CHECK_FIELD(XState.x87.aXMM[ 5].au64[1]);10263 CHECK_ FIELD(XState.x87.aXMM[ 6].au64[0]); CHECK_FIELD(XState.x87.aXMM[ 6].au64[1]);10264 CHECK_ FIELD(XState.x87.aXMM[ 7].au64[0]); CHECK_FIELD(XState.x87.aXMM[ 7].au64[1]);10265 CHECK_ FIELD(XState.x87.aXMM[ 8].au64[0]); CHECK_FIELD(XState.x87.aXMM[ 8].au64[1]);10266 CHECK_ FIELD(XState.x87.aXMM[ 9].au64[0]); CHECK_FIELD(XState.x87.aXMM[ 9].au64[1]);10267 CHECK_ FIELD(XState.x87.aXMM[10].au64[0]); CHECK_FIELD(XState.x87.aXMM[10].au64[1]);10268 CHECK_ FIELD(XState.x87.aXMM[11].au64[0]); CHECK_FIELD(XState.x87.aXMM[11].au64[1]);10269 CHECK_ FIELD(XState.x87.aXMM[12].au64[0]); CHECK_FIELD(XState.x87.aXMM[12].au64[1]);10270 CHECK_ FIELD(XState.x87.aXMM[13].au64[0]); CHECK_FIELD(XState.x87.aXMM[13].au64[1]);10271 CHECK_ FIELD(XState.x87.aXMM[14].au64[0]); CHECK_FIELD(XState.x87.aXMM[14].au64[1]);10272 CHECK_ FIELD(XState.x87.aXMM[15].au64[0]); CHECK_FIELD(XState.x87.aXMM[15].au64[1]);10273 for (unsigned i = 0; i < RT_ELEMENTS(pOrg Ctx->XState.x87.au32RsrvdRest); i++)10274 CHECK_ FIELD(XState.x87.au32RsrvdRest[i]);10281 CHECK_XSTATE_FIELD(x87.FCW); 10282 CHECK_XSTATE_FIELD(x87.FSW); 10283 CHECK_XSTATE_FIELD(x87.FTW); 10284 CHECK_XSTATE_FIELD(x87.FOP); 10285 CHECK_XSTATE_FIELD(x87.FPUIP); 10286 CHECK_XSTATE_FIELD(x87.CS); 10287 CHECK_XSTATE_FIELD(x87.Rsrvd1); 10288 CHECK_XSTATE_FIELD(x87.FPUDP); 10289 CHECK_XSTATE_FIELD(x87.DS); 10290 CHECK_XSTATE_FIELD(x87.Rsrvd2); 10291 CHECK_XSTATE_FIELD(x87.MXCSR); 10292 CHECK_XSTATE_FIELD(x87.MXCSR_MASK); 10293 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]); 10294 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]); 10295 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]); 10296 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]); 10297 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]); 10298 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]); 10299 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]); 10300 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]); 10301 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]); 10302 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]); 10303 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]); 10304 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]); 10305 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]); 10306 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]); 10307 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]); 10308 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]); 10309 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]); 10310 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]); 10311 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]); 10312 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]); 10313 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]); 10314 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]); 10315 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]); 10316 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]); 10317 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++) 10318 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]); 10275 10319 } 10276 10320 CHECK_FIELD(rip); … … 10518 10562 } 10519 10563 10564 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 10520 10565 Log2(("****\n" 10521 10566 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n" … … 10529 10574 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel, 10530 10575 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u, 10531 p Ctx->XState.x87.FSW, pCtx->XState.x87.FCW, pCtx->XState.x87.FTW, pCtx->XState.x87.MXCSR, pCtx->XState.x87.MXCSR_MASK,10576 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK, 10532 10577 szInstr)); 10533 10578 -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r54898 r55048 6043 6043 */ 6044 6044 6045 PX86XSAVEAREA pXState = pCtx->CTX_SUFF(pXState); 6045 6046 if (iemFRegIsFxSaveFormat(pIemCpu)) 6046 6047 { 6047 p Ctx->XState.x87.FCW = 0x37f;6048 p Ctx->XState.x87.FSW = 0;6049 p Ctx->XState.x87.FTW = 0x00; /* 0 - empty. */6050 p Ctx->XState.x87.FPUDP = 0;6051 p Ctx->XState.x87.DS = 0; //??6052 p Ctx->XState.x87.Rsrvd2= 0;6053 p Ctx->XState.x87.FPUIP = 0;6054 p Ctx->XState.x87.CS = 0; //??6055 p Ctx->XState.x87.Rsrvd1= 0;6056 p Ctx->XState.x87.FOP = 0;6048 pXState->x87.FCW = 0x37f; 6049 pXState->x87.FSW = 0; 6050 pXState->x87.FTW = 0x00; /* 0 - empty. */ 6051 pXState->x87.FPUDP = 0; 6052 pXState->x87.DS = 0; //?? 6053 pXState->x87.Rsrvd2= 0; 6054 pXState->x87.FPUIP = 0; 6055 pXState->x87.CS = 0; //?? 6056 pXState->x87.Rsrvd1= 0; 6057 pXState->x87.FOP = 0; 6057 6058 } 6058 6059 else 6059 6060 { 6060 PX86FPUSTATE pFpu = (PX86FPUSTATE)&p Ctx->XState.x87;6061 PX86FPUSTATE pFpu = (PX86FPUSTATE)&pXState->x87; 6061 6062 pFpu->FCW = 0x37f; 6062 6063 pFpu->FSW = 0; … … 6112 6113 if (rcStrict != VINF_SUCCESS) 6113 6114 return rcStrict; 6114 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512; 6115 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512; 6116 PCX86FXSTATE pSrc = &pCtx->CTX_SUFF(pXState)->x87; 6115 6117 6116 6118 /* … … 6121 6123 6122 6124 /* common for all formats */ 6123 pDst->FCW = p Ctx->XState.x87.FCW;6124 pDst->FSW = p Ctx->XState.x87.FSW;6125 pDst->FTW = p Ctx->XState.x87.FTW & UINT16_C(0xff);6126 pDst->FOP = p Ctx->XState.x87.FOP;6127 pDst->MXCSR = p Ctx->XState.x87.MXCSR;6128 pDst->MXCSR_MASK = p Ctx->XState.x87.MXCSR_MASK;6125 pDst->FCW = pSrc->FCW; 6126 pDst->FSW = pSrc->FSW; 6127 pDst->FTW = pSrc->FTW & UINT16_C(0xff); 6128 pDst->FOP = pSrc->FOP; 6129 pDst->MXCSR = pSrc->MXCSR; 6130 pDst->MXCSR_MASK = pSrc->MXCSR_MASK; 6129 6131 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++) 6130 6132 { 6131 6133 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing 6132 6134 * them for now... */ 6133 pDst->aRegs[i].au32[0] = p Ctx->XState.x87.aRegs[i].au32[0];6134 pDst->aRegs[i].au32[1] = p Ctx->XState.x87.aRegs[i].au32[1];6135 pDst->aRegs[i].au32[2] = p Ctx->XState.x87.aRegs[i].au32[2] & UINT32_C(0xffff);6135 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0]; 6136 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1]; 6137 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff); 6136 6138 pDst->aRegs[i].au32[3] = 0; 6137 6139 } 6138 6140 6139 6141 /* FPU IP, CS, DP and DS. */ 6140 pDst->FPUIP = p Ctx->XState.x87.FPUIP;6141 pDst->CS = p Ctx->XState.x87.CS;6142 pDst->FPUDP = p Ctx->XState.x87.FPUDP;6143 pDst->DS = p Ctx->XState.x87.DS;6142 pDst->FPUIP = pSrc->FPUIP; 6143 pDst->CS = pSrc->CS; 6144 pDst->FPUDP = pSrc->FPUDP; 6145 pDst->DS = pSrc->DS; 6144 6146 if (enmEffOpSize == IEMMODE_64BIT) 6145 6147 { 6146 6148 /* Save upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */ 6147 pDst->Rsrvd1 = p Ctx->XState.x87.Rsrvd1;6148 pDst->Rsrvd2 = p Ctx->XState.x87.Rsrvd2;6149 pDst->Rsrvd1 = pSrc->Rsrvd1; 6150 pDst->Rsrvd2 = pSrc->Rsrvd2; 6149 6151 pDst->au32RsrvdForSoftware[0] = 0; 6150 6152 } … … 6163 6165 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8; 6164 6166 for (uint32_t i = 0; i < cXmmRegs; i++) 6165 pDst->aXMM[i] = p Ctx->XState.x87.aXMM[i];6167 pDst->aXMM[i] = pSrc->aXMM[i]; 6166 6168 /** @todo Testcase: What happens to the reserved XMM registers? Untouched, 6167 6169 * right? */ … … 6217 6219 return rcStrict; 6218 6220 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512; 6221 PX86FXSTATE pDst = &pCtx->CTX_SUFF(pXState)->x87; 6219 6222 6220 6223 /* … … 6222 6225 */ 6223 6226 uint32_t const fMXCSR = pSrc->MXCSR; 6224 uint32_t const fMXCSR_MASK = p Ctx->XState.x87.MXCSR_MASK ? pCtx->XState.x87.MXCSR_MASK : UINT32_C(0xffbf);6227 uint32_t const fMXCSR_MASK = pDst->MXCSR_MASK ? pDst->MXCSR_MASK : UINT32_C(0xffbf); 6225 6228 if (fMXCSR & ~fMXCSR_MASK) 6226 6229 { … … 6236 6239 6237 6240 /* common for all formats */ 6238 p Ctx->XState.x87.FCW = pSrc->FCW;6239 p Ctx->XState.x87.FSW = pSrc->FSW;6240 p Ctx->XState.x87.FTW = pSrc->FTW & UINT16_C(0xff);6241 p Ctx->XState.x87.FOP = pSrc->FOP;6242 p Ctx->XState.x87.MXCSR = fMXCSR;6241 pDst->FCW = pSrc->FCW; 6242 pDst->FSW = pSrc->FSW; 6243 pDst->FTW = pSrc->FTW & UINT16_C(0xff); 6244 pDst->FOP = pSrc->FOP; 6245 pDst->MXCSR = fMXCSR; 6243 6246 /* (MXCSR_MASK is read-only) */ 6244 6247 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++) 6245 6248 { 6246 p Ctx->XState.x87.aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];6247 p Ctx->XState.x87.aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];6248 p Ctx->XState.x87.aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);6249 p Ctx->XState.x87.aRegs[i].au32[3] = 0;6249 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0]; 6250 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1]; 6251 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff); 6252 pDst->aRegs[i].au32[3] = 0; 6250 6253 } 6251 6254 … … 6253 6256 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) 6254 6257 { 6255 p Ctx->XState.x87.FPUIP = pSrc->FPUIP;6256 p Ctx->XState.x87.CS = pSrc->CS;6257 p Ctx->XState.x87.Rsrvd1 = pSrc->Rsrvd1;6258 p Ctx->XState.x87.FPUDP = pSrc->FPUDP;6259 p Ctx->XState.x87.DS = pSrc->DS;6260 p Ctx->XState.x87.Rsrvd2 = pSrc->Rsrvd2;6258 pDst->FPUIP = pSrc->FPUIP; 6259 pDst->CS = pSrc->CS; 6260 pDst->Rsrvd1 = pSrc->Rsrvd1; 6261 pDst->FPUDP = pSrc->FPUDP; 6262 pDst->DS = pSrc->DS; 6263 pDst->Rsrvd2 = pSrc->Rsrvd2; 6261 6264 } 6262 6265 else 6263 6266 { 6264 p Ctx->XState.x87.FPUIP = pSrc->FPUIP;6265 p Ctx->XState.x87.CS = pSrc->CS;6266 p Ctx->XState.x87.Rsrvd1 = 0;6267 p Ctx->XState.x87.FPUDP = pSrc->FPUDP;6268 p Ctx->XState.x87.DS = pSrc->DS;6269 p Ctx->XState.x87.Rsrvd2 = 0;6267 pDst->FPUIP = pSrc->FPUIP; 6268 pDst->CS = pSrc->CS; 6269 pDst->Rsrvd1 = 0; 6270 pDst->FPUDP = pSrc->FPUDP; 6271 pDst->DS = pSrc->DS; 6272 pDst->Rsrvd2 = 0; 6270 6273 } 6271 6274 … … 6277 6280 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8; 6278 6281 for (uint32_t i = 0; i < cXmmRegs; i++) 6279 p Ctx->XState.x87.aXMM[i] = pSrc->aXMM[i];6282 pDst->aXMM[i] = pSrc->aXMM[i]; 6280 6283 } 6281 6284 … … 6301 6304 static void iemCImplCommonFpuStoreEnv(PIEMCPU pIemCpu, IEMMODE enmEffOpSize, RTPTRUNION uPtr, PCCPUMCTX pCtx) 6302 6305 { 6306 PCX86FXSTATE pSrcX87 = &pCtx->CTX_SUFF(pXState)->x87; 6303 6307 if (enmEffOpSize == IEMMODE_16BIT) 6304 6308 { 6305 uPtr.pu16[0] = p Ctx->XState.x87.FCW;6306 uPtr.pu16[1] = p Ctx->XState.x87.FSW;6307 uPtr.pu16[2] = iemFpuCalcFullFtw(p Ctx);6309 uPtr.pu16[0] = pSrcX87->FCW; 6310 uPtr.pu16[1] = pSrcX87->FSW; 6311 uPtr.pu16[2] = iemFpuCalcFullFtw(pSrcX87); 6308 6312 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu)) 6309 6313 { … … 6313 6317 * effective address ((CS << 4) + IP) in the offset register and not 6314 6318 * doing any address calculations here. */ 6315 uPtr.pu16[3] = (uint16_t)p Ctx->XState.x87.FPUIP;6316 uPtr.pu16[4] = ((p Ctx->XState.x87.FPUIP >> 4) & UINT16_C(0xf000)) | pCtx->XState.x87.FOP;6317 uPtr.pu16[5] = (uint16_t)p Ctx->XState.x87.FPUDP;6318 uPtr.pu16[6] = (p Ctx->XState.x87.FPUDP >> 4) & UINT16_C(0xf000);6319 uPtr.pu16[3] = (uint16_t)pSrcX87->FPUIP; 6320 uPtr.pu16[4] = ((pSrcX87->FPUIP >> 4) & UINT16_C(0xf000)) | pSrcX87->FOP; 6321 uPtr.pu16[5] = (uint16_t)pSrcX87->FPUDP; 6322 uPtr.pu16[6] = (pSrcX87->FPUDP >> 4) & UINT16_C(0xf000); 6319 6323 } 6320 6324 else 6321 6325 { 6322 uPtr.pu16[3] = p Ctx->XState.x87.FPUIP;6323 uPtr.pu16[4] = p Ctx->XState.x87.CS;6324 uPtr.pu16[5] = p Ctx->XState.x87.FPUDP;6325 uPtr.pu16[6] = p Ctx->XState.x87.DS;6326 uPtr.pu16[3] = pSrcX87->FPUIP; 6327 uPtr.pu16[4] = pSrcX87->CS; 6328 uPtr.pu16[5] = pSrcX87->FPUDP; 6329 uPtr.pu16[6] = pSrcX87->DS; 6326 6330 } 6327 6331 } … … 6329 6333 { 6330 6334 /** @todo Testcase: what is stored in the "gray" areas? (figure 8-9 and 8-10) */ 6331 uPtr.pu16[0*2] = p Ctx->XState.x87.FCW;6332 uPtr.pu16[1*2] = p Ctx->XState.x87.FSW;6333 uPtr.pu16[2*2] = iemFpuCalcFullFtw(p Ctx);6335 uPtr.pu16[0*2] = pSrcX87->FCW; 6336 uPtr.pu16[1*2] = pSrcX87->FSW; 6337 uPtr.pu16[2*2] = iemFpuCalcFullFtw(pSrcX87); 6334 6338 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu)) 6335 6339 { 6336 uPtr.pu16[3*2] = (uint16_t)p Ctx->XState.x87.FPUIP;6337 uPtr.pu32[4] = ((p Ctx->XState.x87.FPUIP & UINT32_C(0xffff0000)) >> 4) | pCtx->XState.x87.FOP;6338 uPtr.pu16[5*2] = (uint16_t)p Ctx->XState.x87.FPUDP;6339 uPtr.pu32[6] = (p Ctx->XState.x87.FPUDP & UINT32_C(0xffff0000)) >> 4;6340 uPtr.pu16[3*2] = (uint16_t)pSrcX87->FPUIP; 6341 uPtr.pu32[4] = ((pSrcX87->FPUIP & UINT32_C(0xffff0000)) >> 4) | pSrcX87->FOP; 6342 uPtr.pu16[5*2] = (uint16_t)pSrcX87->FPUDP; 6343 uPtr.pu32[6] = (pSrcX87->FPUDP & UINT32_C(0xffff0000)) >> 4; 6340 6344 } 6341 6345 else 6342 6346 { 6343 uPtr.pu32[3] = p Ctx->XState.x87.FPUIP;6344 uPtr.pu16[4*2] = p Ctx->XState.x87.CS;6345 uPtr.pu16[4*2+1]= p Ctx->XState.x87.FOP;6346 uPtr.pu32[5] = p Ctx->XState.x87.FPUDP;6347 uPtr.pu16[6*2] = p Ctx->XState.x87.DS;6347 uPtr.pu32[3] = pSrcX87->FPUIP; 6348 uPtr.pu16[4*2] = pSrcX87->CS; 6349 uPtr.pu16[4*2+1]= pSrcX87->FOP; 6350 uPtr.pu32[5] = pSrcX87->FPUDP; 6351 uPtr.pu16[6*2] = pSrcX87->DS; 6348 6352 } 6349 6353 } … … 6359 6363 static void iemCImplCommonFpuRestoreEnv(PIEMCPU pIemCpu, IEMMODE enmEffOpSize, RTCPTRUNION uPtr, PCPUMCTX pCtx) 6360 6364 { 6365 PX86FXSTATE pDstX87 = &pCtx->CTX_SUFF(pXState)->x87; 6361 6366 if (enmEffOpSize == IEMMODE_16BIT) 6362 6367 { 6363 p Ctx->XState.x87.FCW = uPtr.pu16[0];6364 p Ctx->XState.x87.FSW = uPtr.pu16[1];6365 p Ctx->XState.x87.FTW = uPtr.pu16[2];6368 pDstX87->FCW = uPtr.pu16[0]; 6369 pDstX87->FSW = uPtr.pu16[1]; 6370 pDstX87->FTW = uPtr.pu16[2]; 6366 6371 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu)) 6367 6372 { 6368 p Ctx->XState.x87.FPUIP = uPtr.pu16[3] | ((uint32_t)(uPtr.pu16[4] & UINT16_C(0xf000)) << 4);6369 p Ctx->XState.x87.FPUDP = uPtr.pu16[5] | ((uint32_t)(uPtr.pu16[6] & UINT16_C(0xf000)) << 4);6370 p Ctx->XState.x87.FOP = uPtr.pu16[4] & UINT16_C(0x07ff);6371 p Ctx->XState.x87.CS = 0;6372 p Ctx->XState.x87.Rsrvd1= 0;6373 p Ctx->XState.x87.DS = 0;6374 p Ctx->XState.x87.Rsrvd2= 0;6373 pDstX87->FPUIP = uPtr.pu16[3] | ((uint32_t)(uPtr.pu16[4] & UINT16_C(0xf000)) << 4); 6374 pDstX87->FPUDP = uPtr.pu16[5] | ((uint32_t)(uPtr.pu16[6] & UINT16_C(0xf000)) << 4); 6375 pDstX87->FOP = uPtr.pu16[4] & UINT16_C(0x07ff); 6376 pDstX87->CS = 0; 6377 pDstX87->Rsrvd1= 0; 6378 pDstX87->DS = 0; 6379 pDstX87->Rsrvd2= 0; 6375 6380 } 6376 6381 else 6377 6382 { 6378 p Ctx->XState.x87.FPUIP = uPtr.pu16[3];6379 p Ctx->XState.x87.CS = uPtr.pu16[4];6380 p Ctx->XState.x87.Rsrvd1= 0;6381 p Ctx->XState.x87.FPUDP = uPtr.pu16[5];6382 p Ctx->XState.x87.DS = uPtr.pu16[6];6383 p Ctx->XState.x87.Rsrvd2= 0;6383 pDstX87->FPUIP = uPtr.pu16[3]; 6384 pDstX87->CS = uPtr.pu16[4]; 6385 pDstX87->Rsrvd1= 0; 6386 pDstX87->FPUDP = uPtr.pu16[5]; 6387 pDstX87->DS = uPtr.pu16[6]; 6388 pDstX87->Rsrvd2= 0; 6384 6389 /** @todo Testcase: Is FOP cleared when doing 16-bit protected mode fldenv? */ 6385 6390 } … … 6387 6392 else 6388 6393 { 6389 p Ctx->XState.x87.FCW = uPtr.pu16[0*2];6390 p Ctx->XState.x87.FSW = uPtr.pu16[1*2];6391 p Ctx->XState.x87.FTW = uPtr.pu16[2*2];6394 pDstX87->FCW = uPtr.pu16[0*2]; 6395 pDstX87->FSW = uPtr.pu16[1*2]; 6396 pDstX87->FTW = uPtr.pu16[2*2]; 6392 6397 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu)) 6393 6398 { 6394 p Ctx->XState.x87.FPUIP = uPtr.pu16[3*2] | ((uPtr.pu32[4] & UINT32_C(0x0ffff000)) << 4);6395 p Ctx->XState.x87.FOP = uPtr.pu32[4] & UINT16_C(0x07ff);6396 p Ctx->XState.x87.FPUDP = uPtr.pu16[5*2] | ((uPtr.pu32[6] & UINT32_C(0x0ffff000)) << 4);6397 p Ctx->XState.x87.CS = 0;6398 p Ctx->XState.x87.Rsrvd1= 0;6399 p Ctx->XState.x87.DS = 0;6400 p Ctx->XState.x87.Rsrvd2= 0;6399 pDstX87->FPUIP = uPtr.pu16[3*2] | ((uPtr.pu32[4] & UINT32_C(0x0ffff000)) << 4); 6400 pDstX87->FOP = uPtr.pu32[4] & UINT16_C(0x07ff); 6401 pDstX87->FPUDP = uPtr.pu16[5*2] | ((uPtr.pu32[6] & UINT32_C(0x0ffff000)) << 4); 6402 pDstX87->CS = 0; 6403 pDstX87->Rsrvd1= 0; 6404 pDstX87->DS = 0; 6405 pDstX87->Rsrvd2= 0; 6401 6406 } 6402 6407 else 6403 6408 { 6404 p Ctx->XState.x87.FPUIP = uPtr.pu32[3];6405 p Ctx->XState.x87.CS = uPtr.pu16[4*2];6406 p Ctx->XState.x87.Rsrvd1= 0;6407 p Ctx->XState.x87.FOP = uPtr.pu16[4*2+1];6408 p Ctx->XState.x87.FPUDP = uPtr.pu32[5];6409 p Ctx->XState.x87.DS = uPtr.pu16[6*2];6410 p Ctx->XState.x87.Rsrvd2= 0;6409 pDstX87->FPUIP = uPtr.pu32[3]; 6410 pDstX87->CS = uPtr.pu16[4*2]; 6411 pDstX87->Rsrvd1= 0; 6412 pDstX87->FOP = uPtr.pu16[4*2+1]; 6413 pDstX87->FPUDP = uPtr.pu32[5]; 6414 pDstX87->DS = uPtr.pu16[6*2]; 6415 pDstX87->Rsrvd2= 0; 6411 6416 } 6412 6417 } 6413 6418 6414 6419 /* Make adjustments. */ 6415 p Ctx->XState.x87.FTW = iemFpuCompressFtw(pCtx->XState.x87.FTW);6416 p Ctx->XState.x87.FCW &= ~X86_FCW_ZERO_MASK;6417 iemFpuRecalcExceptionStatus(p Ctx);6420 pDstX87->FTW = iemFpuCompressFtw(pDstX87->FTW); 6421 pDstX87->FCW &= ~X86_FCW_ZERO_MASK; 6422 iemFpuRecalcExceptionStatus(pDstX87); 6418 6423 /** @todo Testcase: Check if ES and/or B are automatically cleared if no 6419 6424 * exceptions are pending after loading the saved state? */ … … 6464 6469 return rcStrict; 6465 6470 6471 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 6466 6472 iemCImplCommonFpuStoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx); 6467 6473 PRTFLOAT80U paRegs = (PRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28)); 6468 for (uint32_t i = 0; i < RT_ELEMENTS(p Ctx->XState.x87.aRegs); i++)6469 { 6470 paRegs[i].au32[0] = p Ctx->XState.x87.aRegs[i].au32[0];6471 paRegs[i].au32[1] = p Ctx->XState.x87.aRegs[i].au32[1];6472 paRegs[i].au16[4] = p Ctx->XState.x87.aRegs[i].au16[4];6474 for (uint32_t i = 0; i < RT_ELEMENTS(pFpuCtx->aRegs); i++) 6475 { 6476 paRegs[i].au32[0] = pFpuCtx->aRegs[i].au32[0]; 6477 paRegs[i].au32[1] = pFpuCtx->aRegs[i].au32[1]; 6478 paRegs[i].au16[4] = pFpuCtx->aRegs[i].au16[4]; 6473 6479 } 6474 6480 … … 6478 6484 6479 6485 /* 6480 * Re-initialize the XState.x87.6481 */ 6482 p Ctx->XState.x87.FCW = 0x37f;6483 p Ctx->XState.x87.FSW = 0;6484 p Ctx->XState.x87.FTW = 0x00; /* 0 - empty */6485 p Ctx->XState.x87.FPUDP = 0;6486 p Ctx->XState.x87.DS = 0;6487 p Ctx->XState.x87.Rsrvd2= 0;6488 p Ctx->XState.x87.FPUIP = 0;6489 p Ctx->XState.x87.CS = 0;6490 p Ctx->XState.x87.Rsrvd1= 0;6491 p Ctx->XState.x87.FOP = 0;6486 * Re-initialize the FPU context. 6487 */ 6488 pFpuCtx->FCW = 0x37f; 6489 pFpuCtx->FSW = 0; 6490 pFpuCtx->FTW = 0x00; /* 0 - empty */ 6491 pFpuCtx->FPUDP = 0; 6492 pFpuCtx->DS = 0; 6493 pFpuCtx->Rsrvd2= 0; 6494 pFpuCtx->FPUIP = 0; 6495 pFpuCtx->CS = 0; 6496 pFpuCtx->Rsrvd1= 0; 6497 pFpuCtx->FOP = 0; 6492 6498 6493 6499 iemHlpUsedFpu(pIemCpu); … … 6541 6547 return rcStrict; 6542 6548 6549 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 6543 6550 iemCImplCommonFpuRestoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx); 6544 6551 PCRTFLOAT80U paRegs = (PCRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28)); 6545 for (uint32_t i = 0; i < RT_ELEMENTS(p Ctx->XState.x87.aRegs); i++)6546 { 6547 p Ctx->XState.x87.aRegs[i].au32[0] = paRegs[i].au32[0];6548 p Ctx->XState.x87.aRegs[i].au32[1] = paRegs[i].au32[1];6549 p Ctx->XState.x87.aRegs[i].au32[2] = paRegs[i].au16[4];6550 p Ctx->XState.x87.aRegs[i].au32[3] = 0;6552 for (uint32_t i = 0; i < RT_ELEMENTS(pFpuCtx->aRegs); i++) 6553 { 6554 pFpuCtx->aRegs[i].au32[0] = paRegs[i].au32[0]; 6555 pFpuCtx->aRegs[i].au32[1] = paRegs[i].au32[1]; 6556 pFpuCtx->aRegs[i].au32[2] = paRegs[i].au16[4]; 6557 pFpuCtx->aRegs[i].au32[3] = 0; 6551 6558 } 6552 6559 … … 6575 6582 /** @todo Testcase: Test that it raises and loweres the FPU exception bits 6576 6583 * according to FSW. (This is was is currently implemented.) */ 6577 pCtx->XState.x87.FCW = u16Fcw & ~X86_FCW_ZERO_MASK; 6578 iemFpuRecalcExceptionStatus(pCtx); 6584 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 6585 pFpuCtx->FCW = u16Fcw & ~X86_FCW_ZERO_MASK; 6586 iemFpuRecalcExceptionStatus(pFpuCtx); 6579 6587 6580 6588 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */ … … 6595 6603 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 6596 6604 6597 unsigned const iReg1 = X86_FSW_TOP_GET(pCtx->XState.x87.FSW); 6605 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 6606 unsigned const iReg1 = X86_FSW_TOP_GET(pFpuCtx->FSW); 6598 6607 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK; 6599 Assert(!(RT_BIT(iReg1) & p Ctx->XState.x87.FTW) || !(RT_BIT(iReg2) & pCtx->XState.x87.FTW));6608 Assert(!(RT_BIT(iReg1) & pFpuCtx->FTW) || !(RT_BIT(iReg2) & pFpuCtx->FTW)); 6600 6609 6601 6610 /** @todo Testcase: fxch underflow. Making assumptions that underflowed 6602 6611 * registers are read as QNaN and then exchanged. This could be 6603 6612 * wrong... */ 6604 if (p Ctx->XState.x87.FCW & X86_FCW_IM)6605 { 6606 if (RT_BIT(iReg1) & p Ctx->XState.x87.FTW)6607 { 6608 if (RT_BIT(iReg2) & p Ctx->XState.x87.FTW)6609 iemFpuStoreQNan(&p Ctx->XState.x87.aRegs[0].r80);6613 if (pFpuCtx->FCW & X86_FCW_IM) 6614 { 6615 if (RT_BIT(iReg1) & pFpuCtx->FTW) 6616 { 6617 if (RT_BIT(iReg2) & pFpuCtx->FTW) 6618 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80); 6610 6619 else 6611 p Ctx->XState.x87.aRegs[0].r80 = pCtx->XState.x87.aRegs[iStReg].r80;6612 iemFpuStoreQNan(&p Ctx->XState.x87.aRegs[iStReg].r80);6620 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[iStReg].r80; 6621 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80); 6613 6622 } 6614 6623 else 6615 6624 { 6616 p Ctx->XState.x87.aRegs[iStReg].r80 = pCtx->XState.x87.aRegs[0].r80;6617 iemFpuStoreQNan(&p Ctx->XState.x87.aRegs[0].r80);6618 } 6619 p Ctx->XState.x87.FSW &= ~X86_FSW_C_MASK;6620 p Ctx->XState.x87.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;6625 pFpuCtx->aRegs[iStReg].r80 = pFpuCtx->aRegs[0].r80; 6626 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80); 6627 } 6628 pFpuCtx->FSW &= ~X86_FSW_C_MASK; 6629 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF; 6621 6630 } 6622 6631 else 6623 6632 { 6624 6633 /* raise underflow exception, don't change anything. */ 6625 p Ctx->XState.x87.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_XCPT_MASK);6626 p Ctx->XState.x87.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;6627 } 6628 6629 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx );6634 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_XCPT_MASK); 6635 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B; 6636 } 6637 6638 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx); 6630 6639 iemHlpUsedFpu(pIemCpu); 6631 6640 iemRegAddToRipAndClearRF(pIemCpu, cbInstr); … … 6649 6658 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS)) 6650 6659 return iemRaiseDeviceNotAvailable(pIemCpu); 6651 uint16_t u16Fsw = pCtx->XState.x87.FSW; 6660 6661 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 6662 uint16_t u16Fsw = pFpuCtx->FSW; 6652 6663 if (u16Fsw & X86_FSW_ES) 6653 6664 return iemRaiseMathFault(pIemCpu); … … 6658 6669 unsigned const iReg1 = X86_FSW_TOP_GET(u16Fsw); 6659 6670 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK; 6660 if ((pCtx->XState.x87.FTW & (RT_BIT(iReg1) | RT_BIT(iReg2))) == (RT_BIT(iReg1) | RT_BIT(iReg2))) 6661 { 6662 uint32_t u32Eflags = pfnAImpl(&pCtx->XState.x87, &u16Fsw, 6663 &pCtx->XState.x87.aRegs[0].r80, &pCtx->XState.x87.aRegs[iStReg].r80); 6671 if ((pFpuCtx->FTW & (RT_BIT(iReg1) | RT_BIT(iReg2))) == (RT_BIT(iReg1) | RT_BIT(iReg2))) 6672 { 6673 uint32_t u32Eflags = pfnAImpl(pFpuCtx, &u16Fsw, &pFpuCtx->aRegs[0].r80, &pFpuCtx->aRegs[iStReg].r80); 6664 6674 NOREF(u32Eflags); 6665 6675 6666 p Ctx->XState.x87.FSW &= ~X86_FSW_C1;6667 p Ctx->XState.x87.FSW |= u16Fsw & ~X86_FSW_TOP_MASK;6676 pFpuCtx->FSW &= ~X86_FSW_C1; 6677 pFpuCtx->FSW |= u16Fsw & ~X86_FSW_TOP_MASK; 6668 6678 if ( !(u16Fsw & X86_FSW_IE) 6669 || (p Ctx->XState.x87.FCW & X86_FCW_IM) )6679 || (pFpuCtx->FCW & X86_FCW_IM) ) 6670 6680 { 6671 6681 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF); … … 6673 6683 } 6674 6684 } 6675 else if (p Ctx->XState.x87.FCW & X86_FCW_IM)6685 else if (pFpuCtx->FCW & X86_FCW_IM) 6676 6686 { 6677 6687 /* Masked underflow. */ 6678 p Ctx->XState.x87.FSW &= ~X86_FSW_C1;6679 p Ctx->XState.x87.FSW |= X86_FSW_IE | X86_FSW_SF;6688 pFpuCtx->FSW &= ~X86_FSW_C1; 6689 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF; 6680 6690 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF); 6681 6691 pCtx->eflags.u |= X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF; … … 6684 6694 { 6685 6695 /* Raise underflow - don't touch EFLAGS or TOP. */ 6686 p Ctx->XState.x87.FSW &= ~X86_FSW_C1;6687 p Ctx->XState.x87.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;6696 pFpuCtx->FSW &= ~X86_FSW_C1; 6697 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B; 6688 6698 fPop = false; 6689 6699 } … … 6694 6704 if (fPop) 6695 6705 { 6696 p Ctx->XState.x87.FTW &= ~RT_BIT(iReg1);6697 p Ctx->XState.x87.FSW &= X86_FSW_TOP_MASK;6698 p Ctx->XState.x87.FSW |= ((iReg1 + 7) & X86_FSW_TOP_SMASK) << X86_FSW_TOP_SHIFT;6699 } 6700 6701 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx );6706 pFpuCtx->FTW &= ~RT_BIT(iReg1); 6707 pFpuCtx->FSW &= X86_FSW_TOP_MASK; 6708 pFpuCtx->FSW |= ((iReg1 + 7) & X86_FSW_TOP_SMASK) << X86_FSW_TOP_SHIFT; 6709 } 6710 6711 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx); 6702 6712 iemHlpUsedFpu(pIemCpu); 6703 6713 iemRegAddToRipAndClearRF(pIemCpu, cbInstr); -
trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp
r54898 r55048 488 488 * We could just all this in assembly. */ 489 489 uint128_t aGuestXmmRegs[16]; 490 memcpy(&aGuestXmmRegs[0], &pVCpu->cpum.s.Guest. XState.x87.aXMM[0], sizeof(aGuestXmmRegs));490 memcpy(&aGuestXmmRegs[0], &pVCpu->cpum.s.Guest.CTX_SUFF(pXState)->x87.aXMM[0], sizeof(aGuestXmmRegs)); 491 491 #endif 492 492 … … 511 511 512 512 #ifdef VBOX_WITH_KERNEL_USING_XMM 513 memcpy(&pVCpu->cpum.s.Guest. XState.x87.aXMM[0], &aGuestXmmRegs[0], sizeof(aGuestXmmRegs));513 memcpy(&pVCpu->cpum.s.Guest.CTX_SUFF(pXState)->x87.aXMM[0], &aGuestXmmRegs[0], sizeof(aGuestXmmRegs)); 514 514 #endif 515 515 } -
trunk/src/VBox/VMM/VMMR0/CPUMR0A.asm
r54898 r55048 1 1 ; $Id$ 2 2 ;; @file 3 ; CPUM - Guest Context Assembly Routines.3 ; CPUM - Ring-0 Assembly Routines (supporting HM and IEM). 4 4 ; 5 5 … … 59 59 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 60 60 BEGINDATA 61 %if 0 ; Currently not used. 62 g_r32_Zero: dd 0.0 63 %endif 64 61 65 ;; 62 66 ; Store the SUPR0AbsIs64bit absolute value here so we can cmp/test without … … 70 74 BEGINCODE 71 75 72 ;; Macro for FXSAVE/FXRSTOR leaky behaviour on AMD CPUs, see cpumR3CheckLeakyFpu(). 76 %if 0 ; Currently not used anywhere. 77 ;; 78 ; Macro for FXSAVE/FXRSTOR leaky behaviour on AMD CPUs, see cpumR3CheckLeakyFpu(). 79 ; 73 80 ; Cleans the FPU state, if necessary, before restoring the FPU. 74 81 ; 75 82 ; This macro ASSUMES CR0.TS is not set! 76 ; @remarks Trashes xAX!! 83 ; 84 ; @param xDX Pointer to CPUMCPU. 85 ; @uses xAX, EFLAGS 86 ; 77 87 ; Changes here should also be reflected in CPUMRCA.asm's copy! 88 ; 78 89 %macro CLEANFPU 0 79 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USE_FFXSR_LEAKY80 jz .nothing_to_clean81 82 xor eax, eax83 fnstsw ax ; Get FSW84 test eax, RT_BIT(7); If FSW.ES (bit 7) is set, clear it to not cause FPU exceptions85 ; while clearing & loading the FPU bits in 'clean_fpu'86 jz .clean_fpu87 fnclex90 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USE_FFXSR_LEAKY 91 jz .nothing_to_clean 92 93 xor eax, eax 94 fnstsw ax ; FSW -> AX. 95 test eax, RT_BIT(7) ; If FSW.ES (bit 7) is set, clear it to not cause FPU exceptions 96 ; while clearing & loading the FPU bits in 'clean_fpu' below. 97 jz .clean_fpu 98 fnclex 88 99 89 100 .clean_fpu: 90 ffree st7 ; Clear FPU stack register(7)'s tag entry to prevent overflow if a wraparound occurs 91 ; for the upcoming push (load) 92 fild dword [xDX + CPUMCPU.Guest.XState] ; Explicit FPU load to overwrite FIP, FOP, FDP registers in the FPU. 93 101 ffree st7 ; Clear FPU stack register(7)'s tag entry to prevent overflow if a wraparound occurs. 102 ; for the upcoming push (load) 103 fild dword [g_r32_Zero xWrtRIP] ; Explicit FPU load to overwrite FIP, FOP, FDP registers in the FPU. 94 104 .nothing_to_clean: 95 105 %endmacro 106 %endif ; Unused. 96 107 97 108 … … 99 110 ; save the 32-bit FPU state or 64-bit FPU state. 100 111 ; 101 ; @ remarks Requires CPUMCPU pointer in RDX102 %macro SAVE_32_OR_64_FPU 0 103 o64 fxsave [rdx + CPUMCPU.Guest.XState] 104 105 ; Shouldn't be necessary to check if the entire 64-bit FIP is 0 (i.e. guest hasn't used its FPU yet) because it should 106 ; be taken care of by the calling code, i.e. hmR0[Vmx|Svm]LoadSharedCR0() and hmR0[Vmx|Svm]ExitXcptNm() which ensure107 ; we swap the guest FPU state when it starts using it (#NM). In any case it's only a performance optimization. 108 ; cmp qword [rdx + CPUMCPU.Guest.XState + IP_OFF_IN_X86FXSTATE], 0109 ; je short %%save_done110 111 cmp dword [rdx + CPUMCPU.Guest.XState + CS_OFF_IN_X86FXSTATE], 0 112 jne short %%save_done113 sub rsp, 20h ; Only need 1ch bytes but keep stack aligned otherwise we #GP(0)114 fnstenv [rsp]115 movzx eax, word [rsp + 10h]116 mov [rdx + CPUMCPU.Guest.XState + CS_OFF_IN_X86FXSTATE], eax117 movzx eax, word [rsp + 18h]118 mov [rdx + CPUMCPU.Guest.XState+ DS_OFF_IN_X86FXSTATE], eax119 add rsp, 20h120 mov dword [rdx + CPUMCPU.Guest.XState + X86_OFF_FXSTATE_RSVD], X86_FXSTATE_RSVD_32BIT_MAGIC 112 ; @param %1 Pointer to CPUMCPU. 113 ; @param %2 Pointer to XState. 114 ; @uses xAX, xDX, EFLAGS, 20h of stack. 115 ; 116 %macro SAVE_32_OR_64_FPU 2 117 o64 fxsave [%2] 118 119 xor edx, edx 120 cmp dword [%2 + CS_OFF_IN_X86FXSTATE], 0 121 jne short %%save_done 122 123 sub rsp, 20h ; Only need 1ch bytes but keep stack aligned otherwise we #GP(0). 124 fnstenv [rsp] 125 movzx eax, word [rsp + 10h] 126 mov [%2 + CS_OFF_IN_X86FXSTATE], eax 127 movzx eax, word [rsp + 18h] 128 add rsp, 20h 129 mov [%2 + DS_OFF_IN_X86FXSTATE], eax 130 mov edx, X86_FXSTATE_RSVD_32BIT_MAGIC 131 121 132 %%save_done: 133 mov dword [%2 + X86_OFF_FXSTATE_RSVD], edx 122 134 %endmacro 123 135 124 ;; Macro for FXRSTOR for the guest FPU but loads the one based on what 125 ; was saved before using SAVE_32_OR_64_FPU(). 126 ; 127 ; @remarks Requires CPUMCPU pointer in RDX 128 %macro RESTORE_32_OR_64_FPU 0 129 cmp dword [rdx + CPUMCPU.Guest.XState + X86_OFF_FXSTATE_RSVD], X86_FXSTATE_RSVD_32BIT_MAGIC 130 jne short %%restore_64bit_fpu 131 fxrstor [rdx + CPUMCPU.Guest.XState] 132 jmp short %%restore_fpu_done 136 ;; 137 ; Wrapper for selecting 32-bit or 64-bit FXRSTOR according to what SAVE_32_OR_64_FPU did. 138 ; 139 ; @param %1 Pointer to CPUMCPU. 140 ; @param %2 Pointer to XState. 141 ; @uses xAX, xDX, EFLAGS 142 ; 143 %macro RESTORE_32_OR_64_FPU 2 144 cmp dword [%2 + X86_OFF_FXSTATE_RSVD], X86_FXSTATE_RSVD_32BIT_MAGIC 145 jne short %%restore_64bit_fpu 146 fxrstor [%2] 147 jmp short %%restore_fpu_done 133 148 %%restore_64bit_fpu: 134 o64 fxrstor [rdx + CPUMCPU.Guest.XState]149 o64 fxrstor [%2] 135 150 %%restore_fpu_done: 136 151 %endmacro 137 152 138 153 139 ;; Macro to save and modify CR0 (if necessary) before touching the FPU state 140 ; so as to not cause any FPU exceptions. 141 ; 142 ; @remarks Uses xCX for backing-up CR0 (if CR0 needs to be modified) otherwise clears xCX. 143 ; @remarks Trashes xAX. 144 %macro SAVE_CR0_CLEAR_FPU_TRAPS 0 145 xor ecx, ecx 146 mov xAX, cr0 147 test eax, X86_CR0_TS | X86_CR0_EM ; Make sure its safe to access the FPU state. 148 jz %%skip_cr0_write 149 mov xCX, xAX ; Save old CR0 150 and xAX, ~(X86_CR0_TS | X86_CR0_EM) 151 mov cr0, xAX 154 ;; 155 ; Clears CR0.TS and CR0.EM if necessary, saving the previous result. 156 ; 157 ; This is used to avoid FPU exceptions when touching the FPU state. 158 ; 159 ; @param %1 Register to save the old CR0 in (pass to RESTORE_CR0). 160 ; @param %2 Temporary scratch register. 161 ; @uses EFLAGS, CR0 162 ; 163 %macro SAVE_CR0_CLEAR_FPU_TRAPS 2 164 xor %1, %1 165 mov %2, cr0 166 test %2, X86_CR0_TS | X86_CR0_EM ; Make sure its safe to access the FPU state. 167 jz %%skip_cr0_write 168 mov %1, %2 ; Save old CR0 169 and %2, ~(X86_CR0_TS | X86_CR0_EM) 170 mov cr0, %2 152 171 %%skip_cr0_write: 153 172 %endmacro 154 173 155 ;; Macro to restore CR0 from xCX if necessary. 156 ; 157 ; @remarks xCX should contain the CR0 value to restore or 0 if no restoration is needed. 158 %macro RESTORE_CR0 0 159 cmp ecx, 0 160 je %%skip_cr0_restore 161 mov cr0, xCX 174 ;; 175 ; Restore CR0.TS and CR0.EM state if SAVE_CR0_CLEAR_FPU_TRAPS change it. 176 ; 177 ; @param %1 The register that SAVE_CR0_CLEAR_FPU_TRAPS saved the old CR0 in. 178 ; 179 %macro RESTORE_CR0 1 180 cmp %1, 0 181 je %%skip_cr0_restore 182 mov cr0, %1 162 183 %%skip_cr0_restore: 163 184 %endmacro … … 165 186 166 187 ;; 167 ; Saves the host FPU/ XMM state and restores the gueststate.188 ; Saves the host FPU/SSE/AVX state and restores the guest FPU/SSE/AVX state. 168 189 ; 169 190 ; @returns 0 170 ; @param pC PUMCPUx86:[esp+4] gcc:rdi msc:rcx CPUMCPU pointer191 ; @param pCpumCpu x86:[esp+4] gcc:rdi msc:rcx CPUMCPU pointer 171 192 ; 172 193 align 16 173 194 BEGINPROC cpumR0SaveHostRestoreGuestFPUState 195 ; 196 ; Prologue - xAX+xDX must be free for XSAVE/XRSTOR input. 197 ; 174 198 %ifdef RT_ARCH_AMD64 175 199 %ifdef RT_OS_WINDOWS 176 mov xDX, rcx200 mov r11, rcx 177 201 %else 178 mov xDX, rdi202 mov r11, rdi 179 203 %endif 180 %else 181 mov xDX, dword [esp + 4] 182 %endif 183 pushf ; The darwin kernel can get upset or upset things if an 184 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0. 185 186 ; Switch the state. 187 or dword [xDX + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM) 188 189 ; Clear CR0 FPU bits to not cause exceptions, uses xCX 190 SAVE_CR0_CLEAR_FPU_TRAPS 191 ; Do NOT use xCX from this point! 192 193 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 194 cmp byte [NAME(g_fCPUMIs64bitHost)], 0 195 jz .legacy_mode 196 db 0xea ; jmp far .sixtyfourbit_mode 197 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS) 204 %define pCpumCpu r11 205 %define pXState r10 206 %else 207 push ebx 208 push esi 209 mov ebx, dword [esp + 4] 210 %define pCpumCpu ebx 211 %define pXState esi 212 %endif 213 214 pushf ; The darwin kernel can get upset or upset things if an 215 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0. 216 217 SAVE_CR0_CLEAR_FPU_TRAPS xCX, xAX ; xCX is now old CR0 value, don't use! 218 219 ; 220 ; Switch state. 221 ; 222 mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateR0] 223 224 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 225 cmp byte [NAME(g_fCPUMIs64bitHost)], 0 226 jz .legacy_mode 227 db 0xea ; jmp far .sixtyfourbit_mode 228 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS) 198 229 .legacy_mode: 199 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL200 201 %ifdef RT_ARCH_AMD64 202 ; Use explicit REX prefix. See @bugref{6398}.203 o64 fxsave [rdx + CPUMCPU.Host.XState] ; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor (safe assumption) 204 205 ; Restore the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}.206 test dword [rdx+ CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE207 jnz short .fpu_load_32_or_64208 fxrstor [rdx + CPUMCPU.Guest.XState]209 jmp short .fpu_load_done230 %endif 231 232 %ifdef RT_ARCH_AMD64 233 o64 fxsave [pXState] ; Use explicit REX prefix. See @bugref{6398}. 234 235 ; Restore the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}. 236 mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateR0] 237 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE 238 jnz short .fpu_load_32_or_64 239 fxrstor [pXState] 240 jmp short .fpu_load_done 210 241 .fpu_load_32_or_64: 211 RESTORE_32_OR_64_FPU242 RESTORE_32_OR_64_FPU pCpumCpu, pXState 212 243 .fpu_load_done: 213 244 %else 214 fxsave [edx + CPUMCPU.Host.XState] ; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor (safe assumption) 215 fxrstor [edx + CPUMCPU.Guest.XState] 245 fxsave [pXState] 246 mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateR0] 247 fxrstor [pXState] 216 248 %endif 217 249 218 250 %ifdef VBOX_WITH_KERNEL_USING_XMM 219 ; Restore the non-volatile xmm registers. ASSUMING 64-bit windows220 lea r11, [xDX + CPUMCPU.Host.XState + XMM_OFF_IN_X86FXSTATE]221 movdqa xmm6, [r11+ 060h]222 movdqa xmm7, [r11+ 070h]223 movdqa xmm8, [r11+ 080h]224 movdqa xmm9, [r11+ 090h]225 movdqa xmm10, [r11+ 0a0h]226 movdqa xmm11, [r11+ 0b0h]227 movdqa xmm12, [r11+ 0c0h]228 movdqa xmm13, [r11+ 0d0h]229 movdqa xmm14, [r11+ 0e0h]230 movdqa xmm15, [r11+ 0f0h]251 ; Restore the non-volatile xmm registers. ASSUMING 64-bit host. 252 mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateR0] 253 movdqa xmm6, [pXState + XMM_OFF_IN_X86FXSTATE + 060h] 254 movdqa xmm7, [pXState + XMM_OFF_IN_X86FXSTATE + 070h] 255 movdqa xmm8, [pXState + XMM_OFF_IN_X86FXSTATE + 080h] 256 movdqa xmm9, [pXState + XMM_OFF_IN_X86FXSTATE + 090h] 257 movdqa xmm10, [pXState + XMM_OFF_IN_X86FXSTATE + 0a0h] 258 movdqa xmm11, [pXState + XMM_OFF_IN_X86FXSTATE + 0b0h] 259 movdqa xmm12, [pXState + XMM_OFF_IN_X86FXSTATE + 0c0h] 260 movdqa xmm13, [pXState + XMM_OFF_IN_X86FXSTATE + 0d0h] 261 movdqa xmm14, [pXState + XMM_OFF_IN_X86FXSTATE + 0e0h] 262 movdqa xmm15, [pXState + XMM_OFF_IN_X86FXSTATE + 0f0h] 231 263 %endif 232 264 233 265 .done: 234 ; Restore CR0 from xCX if it was previously saved. 235 RESTORE_CR0 236 popf 237 xor eax, eax 238 ret 266 RESTORE_CR0 xCX 267 or dword [pCpumCpu + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM) 268 popf 269 270 %ifdef RT_ARCH_X86 271 pop esi 272 pop ebx 273 %endif 274 xor eax, eax 275 ret 239 276 240 277 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0 … … 242 279 BITS 64 243 280 .sixtyfourbit_mode: 244 and edx, 0ffffffffh245 o64 fxsave [rdx + CPUMCPU.Host.XState] 246 247 ; Restore the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}.248 test dword [rdx + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE249 jnz short .fpu_load_32_or_64_darwin250 fxrstor [rdx + CPUMCPU.Guest.XState]251 jmp short .fpu_load_done_darwin281 o64 fxsave [pXState] 282 283 ; Restore the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}. 284 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE 285 jnz short .fpu_load_32_or_64_darwin 286 mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateR0] 287 fxrstor [pXState] 288 jmp short .fpu_load_done_darwin 252 289 .fpu_load_32_or_64_darwin: 253 RESTORE_32_OR_64_FPU290 RESTORE_32_OR_64_FPU pCpumCpu, pXState 254 291 .fpu_load_done_darwin: 255 292 256 jmp far [.fpret wrt rip]293 jmp far [.fpret wrt rip] 257 294 .fpret: ; 16:32 Pointer to .the_end. 258 dd .done, NAME(SUPR0AbsKernelCS)295 dd .done, NAME(SUPR0AbsKernelCS) 259 296 BITS 32 260 297 %endif … … 266 303 %ifndef VBOX_WITH_HYBRID_32BIT_KERNEL 267 304 ;; 268 ; Saves the host FPU/ XMM state269 ; 270 ; @returns 0271 ; @param pC PUMCPUx86:[esp+4] gcc:rdi msc:rcx CPUMCPU pointer305 ; Saves the host FPU/SSE/AVX state. 306 ; 307 ; @returns VINF_SUCCESS (0) in EAX 308 ; @param pCpumCpu x86:[esp+4] gcc:rdi msc:rcx CPUMCPU pointer 272 309 ; 273 310 align 16 274 311 BEGINPROC cpumR0SaveHostFPUState 275 mov xDX, dword [esp + 4] 276 pushf ; The darwin kernel can get upset or upset things if an 277 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0. 278 279 ; Switch the state. 280 or dword [xDX + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM) 281 282 ; Clear CR0 FPU bits to not cause exceptions, uses xCX 283 SAVE_CR0_CLEAR_FPU_TRAPS 284 ; Do NOT use xCX from this point! 285 286 fxsave [xDX + CPUMCPU.Host.XState] ; ASSUMES that all VT-x/AMD-V boxes support fxsave/fxrstor (safe assumption) 287 288 ; Restore CR0 from xCX if it was saved previously. 289 RESTORE_CR0 290 291 popf 292 xor eax, eax 293 ret 312 ; 313 ; Prologue - xAX+xDX must be free for XSAVE/XRSTOR input. 314 ; 315 %ifdef RT_ARCH_AMD64 316 %ifdef RT_OS_WINDOWS 317 mov r11, rcx 318 %else 319 mov r11, rdi 320 %endif 321 %define pCpumCpu r11 322 %define pXState r10 323 %else 324 push ebx 325 push esi 326 mov ebx, dword [esp + 4] 327 %define pCpumCpu ebx 328 %define pXState esi 329 %endif 330 331 pushf ; The darwin kernel can get upset or upset things if an 332 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0. 333 SAVE_CR0_CLEAR_FPU_TRAPS xCX, xAX ; xCX is now old CR0 value, don't use! 334 335 ; 336 ; Save the host state. 337 ; 338 mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateR0] 339 340 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 341 cmp byte [NAME(g_fCPUMIs64bitHost)], 0 342 jz .legacy_mode 343 db 0xea ; jmp far .sixtyfourbit_mode 344 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS) 345 .legacy_mode: 346 %endif 347 348 %ifdef RT_ARCH_AMD64 349 o64 fxsave [pXstate] 350 %else 351 fxsave [pXState] 352 %endif 353 354 .done: 355 RESTORE_CR0 xCX 356 or dword [pCpumCpu + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM) 357 popf 358 359 %ifdef RT_ARCH_X86 360 pop esi 361 pop ebx 362 %endif 363 xor eax, eax 364 ret 365 366 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0 367 ALIGNCODE(16) 368 BITS 64 369 .sixtyfourbit_mode: 370 ; Save the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}. 371 o64 fxsave [pXstate] 372 jmp far [.fpret wrt rip] 373 .fpret: ; 16:32 Pointer to .the_end. 374 dd .done, NAME(SUPR0AbsKernelCS) 375 BITS 32 376 %endif 377 %undef pCpumCpu 378 %undef pXState 294 379 ENDPROC cpumR0SaveHostFPUState 295 380 %endif … … 299 384 300 385 ;; 301 ; Saves the guest FPU/ XMM state and restores the hoststate.302 ; 303 ; @returns 0304 ; @param pC PUMCPUx86:[esp+4] gcc:rdi msc:rcx CPUMCPU pointer386 ; Saves the guest FPU/SSE/AVX state and restores the host FPU/SSE/AVX state. 387 ; 388 ; @returns VINF_SUCCESS (0) in eax. 389 ; @param pCpumCpu x86:[esp+4] gcc:rdi msc:rcx CPUMCPU pointer 305 390 ; 306 391 align 16 307 392 BEGINPROC cpumR0SaveGuestRestoreHostFPUState 393 ; 394 ; Prologue - xAX+xDX must be free for XSAVE/XRSTOR input. 395 ; 308 396 %ifdef RT_ARCH_AMD64 309 397 %ifdef RT_OS_WINDOWS 310 mov xDX, rcx398 mov r11, rcx 311 399 %else 312 mov xDX, rdi400 mov r11, rdi 313 401 %endif 314 %else 315 mov xDX, dword [esp + 4] 316 %endif 317 318 ; Only restore FPU if guest has used it. 319 ; Using fxrstor should ensure that we're not causing unwanted exception on the host. 320 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU 321 jz .fpu_not_used 322 323 pushf ; The darwin kernel can get upset or upset things if an 324 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0. 325 326 ; Clear CR0 FPU bits to not cause exceptions, uses xCX 327 SAVE_CR0_CLEAR_FPU_TRAPS 328 ; Do NOT use xCX from this point! 329 330 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 331 cmp byte [NAME(g_fCPUMIs64bitHost)], 0 332 jz .legacy_mode 333 db 0xea ; jmp far .sixtyfourbit_mode 334 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS) 402 %define pCpumCpu r11 403 %define pXState r10 404 %else 405 push ebx 406 push esi 407 mov ebx, dword [esp + 4] 408 %define pCpumCpu ebx 409 %define pXState esi 410 %endif 411 412 ; 413 ; Only restore FPU if guest has used it. 414 ; 415 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USED_FPU 416 jz .fpu_not_used 417 418 pushf ; The darwin kernel can get upset or upset things if an 419 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0. 420 SAVE_CR0_CLEAR_FPU_TRAPS xCX, xAX ; xCX is now old CR0 value, don't use! 421 422 mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateR0] 423 424 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 425 cmp byte [NAME(g_fCPUMIs64bitHost)], 0 426 jz .legacy_mode 427 db 0xea ; jmp far .sixtyfourbit_mode 428 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS) 335 429 .legacy_mode: 336 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL337 338 %ifdef RT_ARCH_AMD64 339 ; Save the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}.340 test dword [rdx+ CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE341 jnz short .fpu_save_32_or_64342 fxsave [rdx + CPUMCPU.Guest.XState]343 jmp short .fpu_save_done430 %endif 431 432 %ifdef RT_ARCH_AMD64 433 ; Save the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}. 434 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE 435 jnz short .fpu_save_32_or_64 436 fxsave [pXState] 437 jmp short .fpu_save_done 344 438 .fpu_save_32_or_64: 345 SAVE_32_OR_64_FPU439 SAVE_32_OR_64_FPU pCpumCpu, pXState 346 440 .fpu_save_done: 347 441 348 ; Use explicit REX prefix. See @bugref{6398}. 349 o64 fxrstor [rdx + CPUMCPU.Host.XState] 350 %else 351 fxsave [edx + CPUMCPU.Guest.XState] ; ASSUMES that all VT-x/AMD-V boxes support fxsave/fxrstor (safe assumption) 352 fxrstor [edx + CPUMCPU.Host.XState] 442 mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateR0] 443 o64 fxrstor [pXState] ; Use explicit REX prefix. See @bugref{6398}. 444 %else 445 fxsave [pXState] ; ASSUMES that all VT-x/AMD-V boxes support fxsave/fxrstor (safe assumption) 446 mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateR0] 447 fxrstor [pXState] 353 448 %endif 354 449 355 450 .done: 356 ; Restore CR0 from xCX if it was previously saved.357 RESTORE_CR0358 and dword [xDX + CPUMCPU.fUseFlags], ~CPUM_USED_FPU359 popf 451 RESTORE_CR0 xCX 452 and dword [pCpumCpu + CPUMCPU.fUseFlags], ~CPUM_USED_FPU 453 popf 454 360 455 .fpu_not_used: 361 xor eax, eax 362 ret 456 %ifdef RT_ARCH_X86 457 pop esi 458 pop ebx 459 %endif 460 xor eax, eax 461 ret 363 462 364 463 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0 … … 366 465 BITS 64 367 466 .sixtyfourbit_mode: 368 and edx, 0ffffffffh 369 370 ; Save the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}. 371 test dword [rdx + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE 372 jnz short .fpu_save_32_or_64_darwin 373 fxsave [rdx + CPUMCPU.Guest.XState] 374 jmp short .fpu_save_done_darwin 467 ; Save the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}. 468 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE 469 jnz short .fpu_save_32_or_64_darwin 470 fxsave [pXState] 471 jmp short .fpu_save_done_darwin 375 472 .fpu_save_32_or_64_darwin: 376 SAVE_32_OR_64_FPU473 SAVE_32_OR_64_FPU pCpumCpu, pXState 377 474 .fpu_save_done_darwin: 378 475 379 o64 fxrstor [rdx + CPUMCPU.Host.XState] 380 jmp far [.fpret wrt rip] 476 mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateR0] 477 o64 fxrstor [pXstate] 478 jmp far [.fpret wrt rip] 381 479 .fpret: ; 16:32 Pointer to .the_end. 382 dd .done, NAME(SUPR0AbsKernelCS)480 dd .done, NAME(SUPR0AbsKernelCS) 383 481 BITS 32 384 482 %endif 483 %undef pCpumCpu 484 %undef pXState 385 485 ENDPROC cpumR0SaveGuestRestoreHostFPUState 386 486 387 487 388 488 ;; 389 ; Sets the host's FPU/XMM state489 ; Restores the host's FPU/SSE/AVX state from pCpumCpu->Host. 390 490 ; 391 491 ; @returns 0 392 ; @param pC PUMCPUx86:[esp+4] gcc:rdi msc:rcx CPUMCPU pointer492 ; @param pCpumCpu x86:[esp+4] gcc:rdi msc:rcx CPUMCPU pointer 393 493 ; 394 494 align 16 395 495 BEGINPROC cpumR0RestoreHostFPUState 496 ; 497 ; Prologue - xAX+xDX must be free for XSAVE/XRSTOR input. 498 ; 396 499 %ifdef RT_ARCH_AMD64 397 500 %ifdef RT_OS_WINDOWS 398 mov xDX, rcx501 mov r11, rcx 399 502 %else 400 mov xDX, rdi503 mov r11, rdi 401 504 %endif 402 %else 403 mov xDX, dword [esp + 4] 404 %endif 405 406 ; Restore FPU if guest has used it. 407 ; Using fxrstor should ensure that we're not causing unwanted exception on the host. 408 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU 409 jz short .fpu_not_used 410 411 pushf ; The darwin kernel can get upset or upset things if an 412 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0. 413 414 ; Clear CR0 FPU bits to not cause exceptions, uses xCX 415 SAVE_CR0_CLEAR_FPU_TRAPS 416 ; Do NOT use xCX from this point! 417 418 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 419 cmp byte [NAME(g_fCPUMIs64bitHost)], 0 420 jz .legacy_mode 421 db 0xea ; jmp far .sixtyfourbit_mode 422 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS) 505 %define pCpumCpu r11 506 %define pXState r10 507 %else 508 push ebx 509 push esi 510 mov ebx, dword [esp + 4] 511 %define pCpumCpu ebx 512 %define pXState esi 513 %endif 514 515 ; 516 ; Restore FPU if guest has used it. 517 ; 518 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USED_FPU 519 jz short .fpu_not_used 520 521 pushf ; The darwin kernel can get upset or upset things if an 522 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0. 523 SAVE_CR0_CLEAR_FPU_TRAPS xCX, xAX ; xCX is now old CR0 value, don't use! 524 525 mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateR0] 526 527 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 528 cmp byte [NAME(g_fCPUMIs64bitHost)], 0 529 jz .legacy_mode 530 db 0xea ; jmp far .sixtyfourbit_mode 531 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS) 423 532 .legacy_mode: 424 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL425 426 %ifdef RT_ARCH_AMD64 427 o64 fxrstor [xDX + CPUMCPU.Host.XState]428 %else 429 fxrstor [xDX + CPUMCPU.Host.XState]533 %endif 534 535 %ifdef RT_ARCH_AMD64 536 o64 fxrstor [pXState] 537 %else 538 fxrstor [pXState] 430 539 %endif 431 540 432 541 .done: 433 ; Restore CR0 from xCX if it was previously saved.434 RESTORE_CR0435 and dword [xDX + CPUMCPU.fUseFlags], ~CPUM_USED_FPU436 popf 542 RESTORE_CR0 xCX 543 and dword [pCpumCpu + CPUMCPU.fUseFlags], ~CPUM_USED_FPU 544 popf 545 437 546 .fpu_not_used: 438 xor eax, eax 439 ret 547 %ifdef RT_ARCH_X86 548 pop esi 549 pop ebx 550 %endif 551 xor eax, eax 552 ret 440 553 441 554 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0 … … 443 556 BITS 64 444 557 .sixtyfourbit_mode: 445 and edx, 0ffffffffh 446 o64 fxrstor [rdx + CPUMCPU.Host.XState] 447 jmp far [.fpret wrt rip] 558 o64 fxrstor [pXState] 559 jmp far [.fpret wrt rip] 448 560 .fpret: ; 16:32 Pointer to .the_end. 449 dd .done, NAME(SUPR0AbsKernelCS)561 dd .done, NAME(SUPR0AbsKernelCS) 450 562 BITS 32 451 563 %endif 564 %undef pCpumCPu 565 %undef pXState 452 566 ENDPROC cpumR0RestoreHostFPUState 453 567 -
trunk/src/VBox/VMM/VMMR0/HMR0.cpp
r54898 r55048 1971 1971 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp)); 1972 1972 1973 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 1973 1974 Log(("FPU:\n" 1974 1975 "FCW=%04x FSW=%04x FTW=%02x\n" … … 1976 1977 "FPUDP=%04x DS=%04x Rsvrd2=%04x MXCSR=%08x MXCSR_MASK=%08x\n" 1977 1978 , 1978 p Ctx->XState.x87.FCW, pCtx->XState.x87.FSW, pCtx->XState.x87.FTW,1979 p Ctx->XState.x87.FOP, pCtx->XState.x87.FPUIP, pCtx->XState.x87.CS, pCtx->XState.x87.Rsrvd1,1980 p Ctx->XState.x87.FPUDP, pCtx->XState.x87.DS, pCtx->XState.x87.Rsrvd2,1981 p Ctx->XState.x87.MXCSR, pCtx->XState.x87.MXCSR_MASK));1979 pFpuCtx->FCW, pFpuCtx->FSW, pFpuCtx->FTW, 1980 pFpuCtx->FOP, pFpuCtx->FPUIP, pFpuCtx->CS, pFpuCtx->Rsrvd1, 1981 pFpuCtx->FPUDP, pFpuCtx->DS, pFpuCtx->Rsrvd2, 1982 pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK)); 1982 1983 1983 1984 Log(("MSR:\n" -
trunk/src/VBox/VMM/VMMR0/HMR0A.asm
r52192 r55048 1186 1186 ; Load the full guest XMM register state. 1187 1187 mov r10, [xBP + 018h] ; pCtx 1188 lea r10, [r10 + XMM_OFF_IN_X86FXSTATE]1189 movdqa xmm0, [r10 + 000h]1190 movdqa xmm1, [r10 + 010h]1191 movdqa xmm2, [r10 + 020h]1192 movdqa xmm3, [r10 + 030h]1193 movdqa xmm4, [r10 + 040h]1194 movdqa xmm5, [r10 + 050h]1195 movdqa xmm6, [r10 + 060h]1196 movdqa xmm7, [r10 + 070h]1197 movdqa xmm8, [r10 + 080h]1198 movdqa xmm9, [r10 + 090h]1199 movdqa xmm10, [r10 + 0a0h]1200 movdqa xmm11, [r10 + 0b0h]1201 movdqa xmm12, [r10 + 0c0h]1202 movdqa xmm13, [r10 + 0d0h]1203 movdqa xmm14, [r10 + 0e0h]1204 movdqa xmm15, [r10 + 0f0h]1188 mov r10, [r10 + CPUMCTX.pXStateR0] 1189 movdqa xmm0, [r10 + XMM_OFF_IN_X86FXSTATE + 000h] 1190 movdqa xmm1, [r10 + XMM_OFF_IN_X86FXSTATE + 010h] 1191 movdqa xmm2, [r10 + XMM_OFF_IN_X86FXSTATE + 020h] 1192 movdqa xmm3, [r10 + XMM_OFF_IN_X86FXSTATE + 030h] 1193 movdqa xmm4, [r10 + XMM_OFF_IN_X86FXSTATE + 040h] 1194 movdqa xmm5, [r10 + XMM_OFF_IN_X86FXSTATE + 050h] 1195 movdqa xmm6, [r10 + XMM_OFF_IN_X86FXSTATE + 060h] 1196 movdqa xmm7, [r10 + XMM_OFF_IN_X86FXSTATE + 070h] 1197 movdqa xmm8, [r10 + XMM_OFF_IN_X86FXSTATE + 080h] 1198 movdqa xmm9, [r10 + XMM_OFF_IN_X86FXSTATE + 090h] 1199 movdqa xmm10, [r10 + XMM_OFF_IN_X86FXSTATE + 0a0h] 1200 movdqa xmm11, [r10 + XMM_OFF_IN_X86FXSTATE + 0b0h] 1201 movdqa xmm12, [r10 + XMM_OFF_IN_X86FXSTATE + 0c0h] 1202 movdqa xmm13, [r10 + XMM_OFF_IN_X86FXSTATE + 0d0h] 1203 movdqa xmm14, [r10 + XMM_OFF_IN_X86FXSTATE + 0e0h] 1204 movdqa xmm15, [r10 + XMM_OFF_IN_X86FXSTATE + 0f0h] 1205 1205 1206 1206 ; Make the call (same as in the other case ). … … 1216 1216 ; Save the guest XMM registers. 1217 1217 mov r10, [xBP + 018h] ; pCtx 1218 lea r10, [r10 + XMM_OFF_IN_X86FXSTATE]1219 movdqa [r10 + 000h], xmm01220 movdqa [r10 + 010h], xmm11221 movdqa [r10 + 020h], xmm21222 movdqa [r10 + 030h], xmm31223 movdqa [r10 + 040h], xmm41224 movdqa [r10 + 050h], xmm51225 movdqa [r10 + 060h], xmm61226 movdqa [r10 + 070h], xmm71227 movdqa [r10 + 080h], xmm81228 movdqa [r10 + 090h], xmm91229 movdqa [r10 + 0a0h], xmm101230 movdqa [r10 + 0b0h], xmm111231 movdqa [r10 + 0c0h], xmm121232 movdqa [r10 + 0d0h], xmm131233 movdqa [r10 + 0e0h], xmm141234 movdqa [r10 + 0f0h], xmm151218 mov r10, [r10 + CPUMCTX.pXStateR0] 1219 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 000h], xmm0 1220 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 010h], xmm1 1221 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 020h], xmm2 1222 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 030h], xmm3 1223 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 040h], xmm4 1224 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 050h], xmm5 1225 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 060h], xmm6 1226 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 070h], xmm7 1227 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 080h], xmm8 1228 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 090h], xmm9 1229 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0a0h], xmm10 1230 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0b0h], xmm11 1231 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0c0h], xmm12 1232 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0d0h], xmm13 1233 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0e0h], xmm14 1234 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0f0h], xmm15 1235 1235 1236 1236 ; Load the host XMM registers. … … 1314 1314 ; Load the full guest XMM register state. 1315 1315 mov r10, [xBP + 020h] ; pCtx 1316 lea r10, [r10 + XMM_OFF_IN_X86FXSTATE]1317 movdqa xmm0, [r10 + 000h]1318 movdqa xmm1, [r10 + 010h]1319 movdqa xmm2, [r10 + 020h]1320 movdqa xmm3, [r10 + 030h]1321 movdqa xmm4, [r10 + 040h]1322 movdqa xmm5, [r10 + 050h]1323 movdqa xmm6, [r10 + 060h]1324 movdqa xmm7, [r10 + 070h]1325 movdqa xmm8, [r10 + 080h]1326 movdqa xmm9, [r10 + 090h]1327 movdqa xmm10, [r10 + 0a0h]1328 movdqa xmm11, [r10 + 0b0h]1329 movdqa xmm12, [r10 + 0c0h]1330 movdqa xmm13, [r10 + 0d0h]1331 movdqa xmm14, [r10 + 0e0h]1332 movdqa xmm15, [r10 + 0f0h]1316 mov r10, [r10 + CPUMCTX.pXStateR0] 1317 movdqa xmm0, [r10 + XMM_OFF_IN_X86FXSTATE + 000h] 1318 movdqa xmm1, [r10 + XMM_OFF_IN_X86FXSTATE + 010h] 1319 movdqa xmm2, [r10 + XMM_OFF_IN_X86FXSTATE + 020h] 1320 movdqa xmm3, [r10 + XMM_OFF_IN_X86FXSTATE + 030h] 1321 movdqa xmm4, [r10 + XMM_OFF_IN_X86FXSTATE + 040h] 1322 movdqa xmm5, [r10 + XMM_OFF_IN_X86FXSTATE + 050h] 1323 movdqa xmm6, [r10 + XMM_OFF_IN_X86FXSTATE + 060h] 1324 movdqa xmm7, [r10 + XMM_OFF_IN_X86FXSTATE + 070h] 1325 movdqa xmm8, [r10 + XMM_OFF_IN_X86FXSTATE + 080h] 1326 movdqa xmm9, [r10 + XMM_OFF_IN_X86FXSTATE + 090h] 1327 movdqa xmm10, [r10 + XMM_OFF_IN_X86FXSTATE + 0a0h] 1328 movdqa xmm11, [r10 + XMM_OFF_IN_X86FXSTATE + 0b0h] 1329 movdqa xmm12, [r10 + XMM_OFF_IN_X86FXSTATE + 0c0h] 1330 movdqa xmm13, [r10 + XMM_OFF_IN_X86FXSTATE + 0d0h] 1331 movdqa xmm14, [r10 + XMM_OFF_IN_X86FXSTATE + 0e0h] 1332 movdqa xmm15, [r10 + XMM_OFF_IN_X86FXSTATE + 0f0h] 1333 1333 1334 1334 ; Make the call (same as in the other case ). … … 1344 1344 ; Save the guest XMM registers. 1345 1345 mov r10, [xBP + 020h] ; pCtx 1346 lea r10, [r10 + XMM_OFF_IN_X86FXSTATE]1347 movdqa [r10 + 000h], xmm01348 movdqa [r10 + 010h], xmm11349 movdqa [r10 + 020h], xmm21350 movdqa [r10 + 030h], xmm31351 movdqa [r10 + 040h], xmm41352 movdqa [r10 + 050h], xmm51353 movdqa [r10 + 060h], xmm61354 movdqa [r10 + 070h], xmm71355 movdqa [r10 + 080h], xmm81356 movdqa [r10 + 090h], xmm91357 movdqa [r10 + 0a0h], xmm101358 movdqa [r10 + 0b0h], xmm111359 movdqa [r10 + 0c0h], xmm121360 movdqa [r10 + 0d0h], xmm131361 movdqa [r10 + 0e0h], xmm141362 movdqa [r10 + 0f0h], xmm151346 mov r10, [r10 + CPUMCTX.pXStateR0] 1347 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 000h], xmm0 1348 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 010h], xmm1 1349 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 020h], xmm2 1350 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 030h], xmm3 1351 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 040h], xmm4 1352 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 050h], xmm5 1353 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 060h], xmm6 1354 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 070h], xmm7 1355 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 080h], xmm8 1356 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 090h], xmm9 1357 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0a0h], xmm10 1358 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0b0h], xmm11 1359 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0c0h], xmm12 1360 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0d0h], xmm13 1361 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0e0h], xmm14 1362 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0f0h], xmm15 1363 1363 1364 1364 ; Load the host XMM registers. -
trunk/src/VBox/VMM/VMMR3/CPUM.cpp
r54930 r55048 112 112 *******************************************************************************/ 113 113 /** Saved state field descriptors for CPUMCTX. */ 114 static const SSMFIELD g_aCpumX87Fields[] = 115 { 116 SSMFIELD_ENTRY( X86FXSTATE, FCW), 117 SSMFIELD_ENTRY( X86FXSTATE, FSW), 118 SSMFIELD_ENTRY( X86FXSTATE, FTW), 119 SSMFIELD_ENTRY( X86FXSTATE, FOP), 120 SSMFIELD_ENTRY( X86FXSTATE, FPUIP), 121 SSMFIELD_ENTRY( X86FXSTATE, CS), 122 SSMFIELD_ENTRY( X86FXSTATE, Rsrvd1), 123 SSMFIELD_ENTRY( X86FXSTATE, FPUDP), 124 SSMFIELD_ENTRY( X86FXSTATE, DS), 125 SSMFIELD_ENTRY( X86FXSTATE, Rsrvd2), 126 SSMFIELD_ENTRY( X86FXSTATE, MXCSR), 127 SSMFIELD_ENTRY( X86FXSTATE, MXCSR_MASK), 128 SSMFIELD_ENTRY( X86FXSTATE, aRegs[0]), 129 SSMFIELD_ENTRY( X86FXSTATE, aRegs[1]), 130 SSMFIELD_ENTRY( X86FXSTATE, aRegs[2]), 131 SSMFIELD_ENTRY( X86FXSTATE, aRegs[3]), 132 SSMFIELD_ENTRY( X86FXSTATE, aRegs[4]), 133 SSMFIELD_ENTRY( X86FXSTATE, aRegs[5]), 134 SSMFIELD_ENTRY( X86FXSTATE, aRegs[6]), 135 SSMFIELD_ENTRY( X86FXSTATE, aRegs[7]), 136 SSMFIELD_ENTRY( X86FXSTATE, aXMM[0]), 137 SSMFIELD_ENTRY( X86FXSTATE, aXMM[1]), 138 SSMFIELD_ENTRY( X86FXSTATE, aXMM[2]), 139 SSMFIELD_ENTRY( X86FXSTATE, aXMM[3]), 140 SSMFIELD_ENTRY( X86FXSTATE, aXMM[4]), 141 SSMFIELD_ENTRY( X86FXSTATE, aXMM[5]), 142 SSMFIELD_ENTRY( X86FXSTATE, aXMM[6]), 143 SSMFIELD_ENTRY( X86FXSTATE, aXMM[7]), 144 SSMFIELD_ENTRY( X86FXSTATE, aXMM[8]), 145 SSMFIELD_ENTRY( X86FXSTATE, aXMM[9]), 146 SSMFIELD_ENTRY( X86FXSTATE, aXMM[10]), 147 SSMFIELD_ENTRY( X86FXSTATE, aXMM[11]), 148 SSMFIELD_ENTRY( X86FXSTATE, aXMM[12]), 149 SSMFIELD_ENTRY( X86FXSTATE, aXMM[13]), 150 SSMFIELD_ENTRY( X86FXSTATE, aXMM[14]), 151 SSMFIELD_ENTRY( X86FXSTATE, aXMM[15]), 152 SSMFIELD_ENTRY_TERM() 153 }; 154 155 /** Saved state field descriptors for CPUMCTX. */ 114 156 static const SSMFIELD g_aCpumCtxFields[] = 115 157 { 116 SSMFIELD_ENTRY( CPUMCTX, XState.x87.FCW),117 SSMFIELD_ENTRY( CPUMCTX, XState.x87.FSW),118 SSMFIELD_ENTRY( CPUMCTX, XState.x87.FTW),119 SSMFIELD_ENTRY( CPUMCTX, XState.x87.FOP),120 SSMFIELD_ENTRY( CPUMCTX, XState.x87.FPUIP),121 SSMFIELD_ENTRY( CPUMCTX, XState.x87.CS),122 SSMFIELD_ENTRY( CPUMCTX, XState.x87.Rsrvd1),123 SSMFIELD_ENTRY( CPUMCTX, XState.x87.FPUDP),124 SSMFIELD_ENTRY( CPUMCTX, XState.x87.DS),125 SSMFIELD_ENTRY( CPUMCTX, XState.x87.Rsrvd2),126 SSMFIELD_ENTRY( CPUMCTX, XState.x87.MXCSR),127 SSMFIELD_ENTRY( CPUMCTX, XState.x87.MXCSR_MASK),128 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aRegs[0]),129 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aRegs[1]),130 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aRegs[2]),131 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aRegs[3]),132 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aRegs[4]),133 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aRegs[5]),134 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aRegs[6]),135 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aRegs[7]),136 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[0]),137 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[1]),138 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[2]),139 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[3]),140 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[4]),141 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[5]),142 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[6]),143 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[7]),144 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[8]),145 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[9]),146 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[10]),147 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[11]),148 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[12]),149 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[13]),150 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[14]),151 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[15]),152 158 SSMFIELD_ENTRY( CPUMCTX, rdi), 153 159 SSMFIELD_ENTRY( CPUMCTX, rsi), … … 246 252 /** Saved state field descriptors for CPUMCTX in V4.1 before the hidden selector 247 253 * registeres changed. */ 254 static const SSMFIELD g_aCpumX87FieldsMem[] = 255 { 256 SSMFIELD_ENTRY( X86FXSTATE, FCW), 257 SSMFIELD_ENTRY( X86FXSTATE, FSW), 258 SSMFIELD_ENTRY( X86FXSTATE, FTW), 259 SSMFIELD_ENTRY( X86FXSTATE, FOP), 260 SSMFIELD_ENTRY( X86FXSTATE, FPUIP), 261 SSMFIELD_ENTRY( X86FXSTATE, CS), 262 SSMFIELD_ENTRY( X86FXSTATE, Rsrvd1), 263 SSMFIELD_ENTRY( X86FXSTATE, FPUDP), 264 SSMFIELD_ENTRY( X86FXSTATE, DS), 265 SSMFIELD_ENTRY( X86FXSTATE, Rsrvd2), 266 SSMFIELD_ENTRY( X86FXSTATE, MXCSR), 267 SSMFIELD_ENTRY( X86FXSTATE, MXCSR_MASK), 268 SSMFIELD_ENTRY( X86FXSTATE, aRegs[0]), 269 SSMFIELD_ENTRY( X86FXSTATE, aRegs[1]), 270 SSMFIELD_ENTRY( X86FXSTATE, aRegs[2]), 271 SSMFIELD_ENTRY( X86FXSTATE, aRegs[3]), 272 SSMFIELD_ENTRY( X86FXSTATE, aRegs[4]), 273 SSMFIELD_ENTRY( X86FXSTATE, aRegs[5]), 274 SSMFIELD_ENTRY( X86FXSTATE, aRegs[6]), 275 SSMFIELD_ENTRY( X86FXSTATE, aRegs[7]), 276 SSMFIELD_ENTRY( X86FXSTATE, aXMM[0]), 277 SSMFIELD_ENTRY( X86FXSTATE, aXMM[1]), 278 SSMFIELD_ENTRY( X86FXSTATE, aXMM[2]), 279 SSMFIELD_ENTRY( X86FXSTATE, aXMM[3]), 280 SSMFIELD_ENTRY( X86FXSTATE, aXMM[4]), 281 SSMFIELD_ENTRY( X86FXSTATE, aXMM[5]), 282 SSMFIELD_ENTRY( X86FXSTATE, aXMM[6]), 283 SSMFIELD_ENTRY( X86FXSTATE, aXMM[7]), 284 SSMFIELD_ENTRY( X86FXSTATE, aXMM[8]), 285 SSMFIELD_ENTRY( X86FXSTATE, aXMM[9]), 286 SSMFIELD_ENTRY( X86FXSTATE, aXMM[10]), 287 SSMFIELD_ENTRY( X86FXSTATE, aXMM[11]), 288 SSMFIELD_ENTRY( X86FXSTATE, aXMM[12]), 289 SSMFIELD_ENTRY( X86FXSTATE, aXMM[13]), 290 SSMFIELD_ENTRY( X86FXSTATE, aXMM[14]), 291 SSMFIELD_ENTRY( X86FXSTATE, aXMM[15]), 292 SSMFIELD_ENTRY_IGNORE( X86FXSTATE, au32RsrvdRest), 293 SSMFIELD_ENTRY_IGNORE( X86FXSTATE, au32RsrvdForSoftware), 294 }; 295 296 /** Saved state field descriptors for CPUMCTX in V4.1 before the hidden selector 297 * registeres changed. */ 248 298 static const SSMFIELD g_aCpumCtxFieldsMem[] = 249 299 { 250 SSMFIELD_ENTRY( CPUMCTX, XState.x87.FCW),251 SSMFIELD_ENTRY( CPUMCTX, XState.x87.FSW),252 SSMFIELD_ENTRY( CPUMCTX, XState.x87.FTW),253 SSMFIELD_ENTRY( CPUMCTX, XState.x87.FOP),254 SSMFIELD_ENTRY( CPUMCTX, XState.x87.FPUIP),255 SSMFIELD_ENTRY( CPUMCTX, XState.x87.CS),256 SSMFIELD_ENTRY( CPUMCTX, XState.x87.Rsrvd1),257 SSMFIELD_ENTRY( CPUMCTX, XState.x87.FPUDP),258 SSMFIELD_ENTRY( CPUMCTX, XState.x87.DS),259 SSMFIELD_ENTRY( CPUMCTX, XState.x87.Rsrvd2),260 SSMFIELD_ENTRY( CPUMCTX, XState.x87.MXCSR),261 SSMFIELD_ENTRY( CPUMCTX, XState.x87.MXCSR_MASK),262 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aRegs[0]),263 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aRegs[1]),264 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aRegs[2]),265 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aRegs[3]),266 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aRegs[4]),267 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aRegs[5]),268 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aRegs[6]),269 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aRegs[7]),270 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[0]),271 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[1]),272 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[2]),273 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[3]),274 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[4]),275 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[5]),276 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[6]),277 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[7]),278 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[8]),279 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[9]),280 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[10]),281 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[11]),282 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[12]),283 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[13]),284 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[14]),285 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[15]),286 SSMFIELD_ENTRY_IGNORE( CPUMCTX, XState.x87.au32RsrvdRest),287 300 SSMFIELD_ENTRY( CPUMCTX, rdi), 288 301 SSMFIELD_ENTRY( CPUMCTX, rsi), … … 376 389 377 390 /** Saved state field descriptors for CPUMCTX_VER1_6. */ 391 static const SSMFIELD g_aCpumX87FieldsV16[] = 392 { 393 SSMFIELD_ENTRY( X86FXSTATE, FCW), 394 SSMFIELD_ENTRY( X86FXSTATE, FSW), 395 SSMFIELD_ENTRY( X86FXSTATE, FTW), 396 SSMFIELD_ENTRY( X86FXSTATE, FOP), 397 SSMFIELD_ENTRY( X86FXSTATE, FPUIP), 398 SSMFIELD_ENTRY( X86FXSTATE, CS), 399 SSMFIELD_ENTRY( X86FXSTATE, Rsrvd1), 400 SSMFIELD_ENTRY( X86FXSTATE, FPUDP), 401 SSMFIELD_ENTRY( X86FXSTATE, DS), 402 SSMFIELD_ENTRY( X86FXSTATE, Rsrvd2), 403 SSMFIELD_ENTRY( X86FXSTATE, MXCSR), 404 SSMFIELD_ENTRY( X86FXSTATE, MXCSR_MASK), 405 SSMFIELD_ENTRY( X86FXSTATE, aRegs[0]), 406 SSMFIELD_ENTRY( X86FXSTATE, aRegs[1]), 407 SSMFIELD_ENTRY( X86FXSTATE, aRegs[2]), 408 SSMFIELD_ENTRY( X86FXSTATE, aRegs[3]), 409 SSMFIELD_ENTRY( X86FXSTATE, aRegs[4]), 410 SSMFIELD_ENTRY( X86FXSTATE, aRegs[5]), 411 SSMFIELD_ENTRY( X86FXSTATE, aRegs[6]), 412 SSMFIELD_ENTRY( X86FXSTATE, aRegs[7]), 413 SSMFIELD_ENTRY( X86FXSTATE, aXMM[0]), 414 SSMFIELD_ENTRY( X86FXSTATE, aXMM[1]), 415 SSMFIELD_ENTRY( X86FXSTATE, aXMM[2]), 416 SSMFIELD_ENTRY( X86FXSTATE, aXMM[3]), 417 SSMFIELD_ENTRY( X86FXSTATE, aXMM[4]), 418 SSMFIELD_ENTRY( X86FXSTATE, aXMM[5]), 419 SSMFIELD_ENTRY( X86FXSTATE, aXMM[6]), 420 SSMFIELD_ENTRY( X86FXSTATE, aXMM[7]), 421 SSMFIELD_ENTRY( X86FXSTATE, aXMM[8]), 422 SSMFIELD_ENTRY( X86FXSTATE, aXMM[9]), 423 SSMFIELD_ENTRY( X86FXSTATE, aXMM[10]), 424 SSMFIELD_ENTRY( X86FXSTATE, aXMM[11]), 425 SSMFIELD_ENTRY( X86FXSTATE, aXMM[12]), 426 SSMFIELD_ENTRY( X86FXSTATE, aXMM[13]), 427 SSMFIELD_ENTRY( X86FXSTATE, aXMM[14]), 428 SSMFIELD_ENTRY( X86FXSTATE, aXMM[15]), 429 SSMFIELD_ENTRY_IGNORE( X86FXSTATE, au32RsrvdRest), 430 SSMFIELD_ENTRY_TERM() 431 }; 432 433 /** Saved state field descriptors for CPUMCTX_VER1_6. */ 378 434 static const SSMFIELD g_aCpumCtxFieldsV16[] = 379 435 { 380 SSMFIELD_ENTRY( CPUMCTX, XState.x87.FCW),381 SSMFIELD_ENTRY( CPUMCTX, XState.x87.FSW),382 SSMFIELD_ENTRY( CPUMCTX, XState.x87.FTW),383 SSMFIELD_ENTRY( CPUMCTX, XState.x87.FOP),384 SSMFIELD_ENTRY( CPUMCTX, XState.x87.FPUIP),385 SSMFIELD_ENTRY( CPUMCTX, XState.x87.CS),386 SSMFIELD_ENTRY( CPUMCTX, XState.x87.Rsrvd1),387 SSMFIELD_ENTRY( CPUMCTX, XState.x87.FPUDP),388 SSMFIELD_ENTRY( CPUMCTX, XState.x87.DS),389 SSMFIELD_ENTRY( CPUMCTX, XState.x87.Rsrvd2),390 SSMFIELD_ENTRY( CPUMCTX, XState.x87.MXCSR),391 SSMFIELD_ENTRY( CPUMCTX, XState.x87.MXCSR_MASK),392 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aRegs[0]),393 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aRegs[1]),394 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aRegs[2]),395 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aRegs[3]),396 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aRegs[4]),397 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aRegs[5]),398 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aRegs[6]),399 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aRegs[7]),400 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[0]),401 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[1]),402 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[2]),403 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[3]),404 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[4]),405 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[5]),406 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[6]),407 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[7]),408 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[8]),409 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[9]),410 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[10]),411 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[11]),412 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[12]),413 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[13]),414 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[14]),415 SSMFIELD_ENTRY( CPUMCTX, XState.x87.aXMM[15]),416 SSMFIELD_ENTRY_IGNORE( CPUMCTX, XState.x87.au32RsrvdRest),417 436 SSMFIELD_ENTRY( CPUMCTX, rdi), 418 437 SSMFIELD_ENTRY( CPUMCTX, rsi), … … 574 593 #endif 575 594 595 /* 596 * Initialize offsets. 597 */ 598 576 599 /* Calculate the offset from CPUM to CPUMCPU for the first CPU. */ 577 600 pVM->cpum.s.offCPUMCPU0 = RT_OFFSETOF(VM, aCpus[0].cpum) - RT_OFFSETOF(VM, cpum); … … 640 663 AssertLogRelRCReturn(rc, rc); 641 664 pVM->cpum.s.GuestFeatures.enmCpuVendor = pVM->cpum.s.HostFeatures.enmCpuVendor; 665 666 /* 667 * Allocate memory for the extended CPU state. 668 */ 669 uint32_t cbMaxXState = sizeof(X86FXSTATE); 670 cbMaxXState = RT_ALIGN(cbMaxXState, 128); 671 uint8_t *pbXStates; 672 rc = MMR3HyperAllocOnceNoRelEx(pVM, cbMaxXState * 3 * pVM->cCpus, PAGE_SIZE, MM_TAG_CPUM_CTX, 673 MMHYPER_AONR_FLAGS_KERNEL_MAPPING, (void **)&pbXStates); 674 AssertLogRelRCReturn(rc, rc); 675 676 for (VMCPUID i = 0; i < pVM->cCpus; i++) 677 { 678 PVMCPU pVCpu = &pVM->aCpus[i]; 679 680 pVCpu->cpum.s.Guest.pXStateR3 = (PX86XSAVEAREA)pbXStates; 681 pVCpu->cpum.s.Guest.pXStateR0 = MMHyperR3ToR0(pVM, pbXStates); 682 pVCpu->cpum.s.Guest.pXStateRC = MMHyperR3ToR0(pVM, pbXStates); 683 pbXStates += cbMaxXState; 684 685 pVCpu->cpum.s.Host.pXStateR3 = (PX86XSAVEAREA)pbXStates; 686 pVCpu->cpum.s.Host.pXStateR0 = MMHyperR3ToR0(pVM, pbXStates); 687 pVCpu->cpum.s.Host.pXStateRC = MMHyperR3ToR0(pVM, pbXStates); 688 pbXStates += cbMaxXState; 689 690 pVCpu->cpum.s.Hyper.pXStateR3 = (PX86XSAVEAREA)pbXStates; 691 pVCpu->cpum.s.Hyper.pXStateR0 = MMHyperR3ToR0(pVM, pbXStates); 692 pVCpu->cpum.s.Hyper.pXStateRC = MMHyperR3ToR0(pVM, pbXStates); 693 pbXStates += cbMaxXState; 694 } 642 695 643 696 /* … … 701 754 pVM->cpum.s.GuestInfo.paCpuIdLeavesRC = MMHyperR3ToRC(pVM, pVM->cpum.s.GuestInfo.paCpuIdLeavesR3); 702 755 703 /* Recheck the guest DRx values in raw-mode. */704 756 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++) 705 CPUMRecalcHyperDRx(&pVM->aCpus[iCpu], UINT8_MAX, false); 757 { 758 PVMCPU pVCpu = &pVM->aCpus[iCpu]; 759 pVCpu->cpum.s.Guest.pXStateRC = MMHyperR3ToRC(pVM, pVCpu->cpum.s.Guest.pXStateR3); 760 pVCpu->cpum.s.Host.pXStateRC = MMHyperR3ToRC(pVM, pVCpu->cpum.s.Host.pXStateR3); 761 pVCpu->cpum.s.Hyper.pXStateRC = MMHyperR3ToRC(pVM, pVCpu->cpum.s.Hyper.pXStateR3); /** @todo remove me */ 762 763 /* Recheck the guest DRx values in raw-mode. */ 764 CPUMRecalcHyperDRx(pVCpu, UINT8_MAX, false); 765 } 706 766 } 707 767 … … 777 837 * Initialize everything to ZERO first. 778 838 */ 779 uint32_t fUseFlags = pVCpu->cpum.s.fUseFlags & ~CPUM_USED_FPU_SINCE_REM; 780 memset(pCtx, 0, sizeof(*pCtx)); 781 pVCpu->cpum.s.fUseFlags = fUseFlags; 839 uint32_t fUseFlags = pVCpu->cpum.s.fUseFlags & ~CPUM_USED_FPU_SINCE_REM; 840 841 AssertCompile(RT_OFFSETOF(CPUMCTX, pXStateR0) < RT_OFFSETOF(CPUMCTX, pXStateR3)); 842 AssertCompile(RT_OFFSETOF(CPUMCTX, pXStateR0) < RT_OFFSETOF(CPUMCTX, pXStateRC)); 843 memset(pCtx, 0, RT_OFFSETOF(CPUMCTX, pXStateR0)); 844 845 pVCpu->cpum.s.fUseFlags = fUseFlags; 782 846 783 847 pCtx->cr0 = X86_CR0_CD | X86_CR0_NW | X86_CR0_ET; //0x60000010 … … 841 905 pCtx->dr[7] = X86_DR7_INIT_VAL; 842 906 843 pCtx->XState.x87.FTW = 0x00; /* All empty (abbridged tag reg edition). */ 844 pCtx->XState.x87.FCW = 0x37f; 907 PX86FXSTATE pFpuCtx = &pCtx->pXStateR3->x87; AssertReleaseMsg(RT_VALID_PTR(pFpuCtx), ("%p\n", pFpuCtx)); 908 pFpuCtx->FTW = 0x00; /* All empty (abbridged tag reg edition). */ 909 pFpuCtx->FCW = 0x37f; 845 910 846 911 /* Intel 64 and IA-32 Architectures Software Developer's Manual Volume 3A, Table 8-1. 847 912 IA-32 Processor States Following Power-up, Reset, or INIT */ 848 p Ctx->XState.x87.MXCSR= 0x1F80;849 p Ctx->XState.x87.MXCSR_MASK= 0xffff; /** @todo REM always changed this for us. Should probably check if the HW really913 pFpuCtx->MXCSR = 0x1F80; 914 pFpuCtx->MXCSR_MASK = 0xffff; /** @todo REM always changed this for us. Should probably check if the HW really 850 915 supports all bits, since a zero value here should be read as 0xffbf. */ 851 916 … … 1021 1086 1022 1087 uint32_t const fLoad = uVersion > CPUM_SAVED_STATE_VERSION_MEM ? 0 : SSMSTRUCT_FLAGS_MEM_BAND_AID_RELAXED; 1023 PCSSMFIELD paCpumCtxFields = g_aCpumCtxFields; 1088 PCSSMFIELD paCpumCtx1Fields = g_aCpumX87Fields; 1089 PCSSMFIELD paCpumCtx2Fields = g_aCpumCtxFields; 1024 1090 if (uVersion == CPUM_SAVED_STATE_VERSION_VER1_6) 1025 paCpumCtxFields = g_aCpumCtxFieldsV16; 1091 { 1092 paCpumCtx1Fields = g_aCpumX87FieldsV16; 1093 paCpumCtx2Fields = g_aCpumCtxFieldsV16; 1094 } 1026 1095 else if (uVersion <= CPUM_SAVED_STATE_VERSION_MEM) 1027 paCpumCtxFields = g_aCpumCtxFieldsMem; 1096 { 1097 paCpumCtx1Fields = g_aCpumX87FieldsMem; 1098 paCpumCtx2Fields = g_aCpumCtxFieldsMem; 1099 } 1028 1100 1029 1101 /* … … 1035 1107 uint64_t uCR3 = pVCpu->cpum.s.Hyper.cr3; 1036 1108 uint64_t uRSP = pVCpu->cpum.s.Hyper.rsp; /* see VMMR3Relocate(). */ 1037 SSMR3GetStructEx(pSSM, &pVCpu->cpum.s.Hyper, sizeof(pVCpu->cpum.s.Hyper), fLoad, paCpumCtxFields, NULL); 1109 /** @todo drop the FPU bits here! */ 1110 SSMR3GetStructEx(pSSM, &pVCpu->cpum.s.Hyper.pXStateR3->x87, sizeof(pVCpu->cpum.s.Hyper.pXStateR3->x87), 1111 fLoad | SSMSTRUCT_FLAGS_NO_TAIL_MARKER, paCpumCtx1Fields, NULL); 1112 SSMR3GetStructEx(pSSM, &pVCpu->cpum.s.Hyper, sizeof(pVCpu->cpum.s.Hyper), 1113 fLoad | SSMSTRUCT_FLAGS_NO_LEAD_MARKER, paCpumCtx2Fields, NULL); 1038 1114 pVCpu->cpum.s.Hyper.cr3 = uCR3; 1039 1115 pVCpu->cpum.s.Hyper.rsp = uRSP; … … 1065 1141 { 1066 1142 PVMCPU pVCpu = &pVM->aCpus[iCpu]; 1067 SSMR3GetStructEx(pSSM, &pVCpu->cpum.s.Guest, sizeof(pVCpu->cpum.s.Guest), fLoad, 1068 paCpumCtxFields, NULL); 1143 SSMR3GetStructEx(pSSM, &pVCpu->cpum.s.Guest.pXStateR3->x87, sizeof(pVCpu->cpum.s.Guest.pXStateR3->x87), 1144 fLoad | SSMSTRUCT_FLAGS_NO_TAIL_MARKER, paCpumCtx1Fields, NULL); 1145 SSMR3GetStructEx(pSSM, &pVCpu->cpum.s.Guest, sizeof(pVCpu->cpum.s.Guest), 1146 fLoad | SSMSTRUCT_FLAGS_NO_LEAD_MARKER, paCpumCtx2Fields, NULL); 1069 1147 SSMR3GetU32(pSSM, &pVCpu->cpum.s.fUseFlags); 1070 1148 SSMR3GetU32(pSSM, &pVCpu->cpum.s.fChanged); … … 1516 1594 pszPrefix, pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp); 1517 1595 1596 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 1518 1597 pHlp->pfnPrintf(pHlp, 1519 1598 "%sFCW=%04x %sFSW=%04x %sFTW=%04x %sFOP=%04x %sMXCSR=%08x %sMXCSR_MASK=%08x\n" 1520 1599 "%sFPUIP=%08x %sCS=%04x %sRsrvd1=%04x %sFPUDP=%08x %sDS=%04x %sRsvrd2=%04x\n" 1521 1600 , 1522 pszPrefix, p Ctx->XState.x87.FCW, pszPrefix, pCtx->XState.x87.FSW, pszPrefix, pCtx->XState.x87.FTW, pszPrefix, pCtx->XState.x87.FOP,1523 pszPrefix, p Ctx->XState.x87.MXCSR, pszPrefix, pCtx->XState.x87.MXCSR_MASK,1524 pszPrefix, p Ctx->XState.x87.FPUIP, pszPrefix, pCtx->XState.x87.CS, pszPrefix, pCtx->XState.x87.Rsrvd1,1525 pszPrefix, p Ctx->XState.x87.FPUDP, pszPrefix, pCtx->XState.x87.DS, pszPrefix, pCtx->XState.x87.Rsrvd21601 pszPrefix, pFpuCtx->FCW, pszPrefix, pFpuCtx->FSW, pszPrefix, pFpuCtx->FTW, pszPrefix, pFpuCtx->FOP, 1602 pszPrefix, pFpuCtx->MXCSR, pszPrefix, pFpuCtx->MXCSR_MASK, 1603 pszPrefix, pFpuCtx->FPUIP, pszPrefix, pFpuCtx->CS, pszPrefix, pFpuCtx->Rsrvd1, 1604 pszPrefix, pFpuCtx->FPUDP, pszPrefix, pFpuCtx->DS, pszPrefix, pFpuCtx->Rsrvd2 1526 1605 ); 1527 unsigned iShift = (p Ctx->XState.x87.FSW >> 11) & 7;1528 for (unsigned iST = 0; iST < RT_ELEMENTS(p Ctx->XState.x87.aRegs); iST++)1606 unsigned iShift = (pFpuCtx->FSW >> 11) & 7; 1607 for (unsigned iST = 0; iST < RT_ELEMENTS(pFpuCtx->aRegs); iST++) 1529 1608 { 1530 unsigned iFPR = (iST + iShift) % RT_ELEMENTS(p Ctx->XState.x87.aRegs);1531 unsigned uTag = p Ctx->XState.x87.FTW & (1 << iFPR) ? 1 : 0;1532 char chSign = p Ctx->XState.x87.aRegs[0].au16[4] & 0x8000 ? '-' : '+';1533 unsigned iInteger = (unsigned)(p Ctx->XState.x87.aRegs[0].au64[0] >> 63);1534 uint64_t u64Fraction = p Ctx->XState.x87.aRegs[0].au64[0] & UINT64_C(0x7fffffffffffffff);1535 unsigned uExponent = p Ctx->XState.x87.aRegs[0].au16[4] & 0x7fff;1609 unsigned iFPR = (iST + iShift) % RT_ELEMENTS(pFpuCtx->aRegs); 1610 unsigned uTag = pFpuCtx->FTW & (1 << iFPR) ? 1 : 0; 1611 char chSign = pFpuCtx->aRegs[0].au16[4] & 0x8000 ? '-' : '+'; 1612 unsigned iInteger = (unsigned)(pFpuCtx->aRegs[0].au64[0] >> 63); 1613 uint64_t u64Fraction = pFpuCtx->aRegs[0].au64[0] & UINT64_C(0x7fffffffffffffff); 1614 unsigned uExponent = pFpuCtx->aRegs[0].au16[4] & 0x7fff; 1536 1615 /** @todo This isn't entirenly correct and needs more work! */ 1537 1616 pHlp->pfnPrintf(pHlp, 1538 1617 "%sST(%u)=%sFPR%u={%04RX16'%08RX32'%08RX32} t%d %c%u.%022llu ^ %u", 1539 1618 pszPrefix, iST, pszPrefix, iFPR, 1540 p Ctx->XState.x87.aRegs[0].au16[4], pCtx->XState.x87.aRegs[0].au32[1], pCtx->XState.x87.aRegs[0].au32[0],1619 pFpuCtx->aRegs[0].au16[4], pFpuCtx->aRegs[0].au32[1], pFpuCtx->aRegs[0].au32[0], 1541 1620 uTag, chSign, iInteger, u64Fraction, uExponent); 1542 if (p Ctx->XState.x87.aRegs[0].au16[5] || pCtx->XState.x87.aRegs[0].au16[6] || pCtx->XState.x87.aRegs[0].au16[7])1621 if (pFpuCtx->aRegs[0].au16[5] || pFpuCtx->aRegs[0].au16[6] || pFpuCtx->aRegs[0].au16[7]) 1543 1622 pHlp->pfnPrintf(pHlp, " res={%04RX16,%04RX16,%04RX16}\n", 1544 p Ctx->XState.x87.aRegs[0].au16[5], pCtx->XState.x87.aRegs[0].au16[6], pCtx->XState.x87.aRegs[0].au16[7]);1623 pFpuCtx->aRegs[0].au16[5], pFpuCtx->aRegs[0].au16[6], pFpuCtx->aRegs[0].au16[7]); 1545 1624 else 1546 1625 pHlp->pfnPrintf(pHlp, "\n"); 1547 1626 } 1548 for (unsigned iXMM = 0; iXMM < RT_ELEMENTS(p Ctx->XState.x87.aXMM); iXMM++)1627 for (unsigned iXMM = 0; iXMM < RT_ELEMENTS(pFpuCtx->aXMM); iXMM++) 1549 1628 pHlp->pfnPrintf(pHlp, 1550 1629 iXMM & 1 … … 1552 1631 : "%sXMM%u%s=%08RX32'%08RX32'%08RX32'%08RX32 ", 1553 1632 pszPrefix, iXMM, iXMM < 10 ? " " : "", 1554 p Ctx->XState.x87.aXMM[iXMM].au32[3],1555 p Ctx->XState.x87.aXMM[iXMM].au32[2],1556 p Ctx->XState.x87.aXMM[iXMM].au32[1],1557 p Ctx->XState.x87.aXMM[iXMM].au32[0]);1558 for (unsigned i = 0; i < RT_ELEMENTS(p Ctx->XState.x87.au32RsrvdRest); i++)1559 if (p Ctx->XState.x87.au32RsrvdRest[i])1633 pFpuCtx->aXMM[iXMM].au32[3], 1634 pFpuCtx->aXMM[iXMM].au32[2], 1635 pFpuCtx->aXMM[iXMM].au32[1], 1636 pFpuCtx->aXMM[iXMM].au32[0]); 1637 for (unsigned i = 0; i < RT_ELEMENTS(pFpuCtx->au32RsrvdRest); i++) 1638 if (pFpuCtx->au32RsrvdRest[i]) 1560 1639 pHlp->pfnPrintf(pHlp, "%sRsrvdRest[i]=%RX32 (offset=%#x)\n", 1561 pszPrefix, i, p Ctx->XState.x87.au32RsrvdRest[i], RT_OFFSETOF(X86FXSTATE, au32RsrvdRest[i]) );1640 pszPrefix, i, pFpuCtx->au32RsrvdRest[i], RT_OFFSETOF(X86FXSTATE, au32RsrvdRest[i]) ); 1562 1641 1563 1642 pHlp->pfnPrintf(pHlp, -
trunk/src/VBox/VMM/VMMR3/CPUMDbg.cpp
r54898 r55048 30 30 #include <VBox/log.h> 31 31 #include <iprt/thread.h> 32 #include <iprt/string.h> 32 33 #include <iprt/uint128.h> 33 34 … … 57 58 58 59 /** 59 * @interface_method_impl{DBGFREGDESC, pfn Get}60 * @interface_method_impl{DBGFREGDESC, pfnSet} 60 61 */ 61 62 static DECLCALLBACK(int) cpumR3RegSet_Generic(void *pvUser, PCDBGFREGDESC pDesc, PCDBGFREGVAL pValue, PCDBGFREGVAL pfMask) … … 100 101 } 101 102 } 103 104 105 /** 106 * @interface_method_impl{DBGFREGDESC, pfnGet} 107 */ 108 static DECLCALLBACK(int) cpumR3RegGet_XStateGeneric(void *pvUser, PCDBGFREGDESC pDesc, PDBGFREGVAL pValue) 109 { 110 PVMCPU pVCpu = (PVMCPU)pvUser; 111 void const *pv = (uint8_t const *)&pVCpu->cpum.s.Guest.pXStateR3 + pDesc->offRegister; 112 113 VMCPU_ASSERT_EMT(pVCpu); 114 115 switch (pDesc->enmType) 116 { 117 case DBGFREGVALTYPE_U8: pValue->u8 = *(uint8_t const *)pv; return VINF_SUCCESS; 118 case DBGFREGVALTYPE_U16: pValue->u16 = *(uint16_t const *)pv; return VINF_SUCCESS; 119 case DBGFREGVALTYPE_U32: pValue->u32 = *(uint32_t const *)pv; return VINF_SUCCESS; 120 case DBGFREGVALTYPE_U64: pValue->u64 = *(uint64_t const *)pv; return VINF_SUCCESS; 121 case DBGFREGVALTYPE_U128: pValue->u128 = *(PCRTUINT128U )pv; return VINF_SUCCESS; 122 default: 123 AssertMsgFailedReturn(("%d %s\n", pDesc->enmType, pDesc->pszName), VERR_IPE_NOT_REACHED_DEFAULT_CASE); 124 } 125 } 126 127 128 /** 129 * @interface_method_impl{DBGFREGDESC, pfnSet} 130 */ 131 static DECLCALLBACK(int) cpumR3RegSet_XStateGeneric(void *pvUser, PCDBGFREGDESC pDesc, PCDBGFREGVAL pValue, PCDBGFREGVAL pfMask) 132 { 133 PVMCPU pVCpu = (PVMCPU)pvUser; 134 void *pv = (uint8_t *)&pVCpu->cpum.s.Guest.pXStateR3 + pDesc->offRegister; 135 136 VMCPU_ASSERT_EMT(pVCpu); 137 138 switch (pDesc->enmType) 139 { 140 case DBGFREGVALTYPE_U8: 141 *(uint8_t *)pv &= ~pfMask->u8; 142 *(uint8_t *)pv |= pValue->u8 & pfMask->u8; 143 return VINF_SUCCESS; 144 145 case DBGFREGVALTYPE_U16: 146 *(uint16_t *)pv &= ~pfMask->u16; 147 *(uint16_t *)pv |= pValue->u16 & pfMask->u16; 148 return VINF_SUCCESS; 149 150 case DBGFREGVALTYPE_U32: 151 *(uint32_t *)pv &= ~pfMask->u32; 152 *(uint32_t *)pv |= pValue->u32 & pfMask->u32; 153 return VINF_SUCCESS; 154 155 case DBGFREGVALTYPE_U64: 156 *(uint64_t *)pv &= ~pfMask->u64; 157 *(uint64_t *)pv |= pValue->u64 & pfMask->u64; 158 return VINF_SUCCESS; 159 160 case DBGFREGVALTYPE_U128: 161 { 162 RTUINT128U Val; 163 RTUInt128AssignAnd((PRTUINT128U)pv, RTUInt128AssignBitwiseNot(RTUInt128Assign(&Val, &pfMask->u128))); 164 RTUInt128AssignOr((PRTUINT128U)pv, RTUInt128AssignAnd(RTUInt128Assign(&Val, &pValue->u128), &pfMask->u128)); 165 return VINF_SUCCESS; 166 } 167 168 default: 169 AssertMsgFailedReturn(("%d %s\n", pDesc->enmType, pDesc->pszName), VERR_IPE_NOT_REACHED_DEFAULT_CASE); 170 } 171 } 172 102 173 103 174 … … 251 322 */ 252 323 static DECLCALLBACK(int) cpumR3RegSet_ftw(void *pvUser, PCDBGFREGDESC pDesc, PCDBGFREGVAL pValue, PCDBGFREGVAL pfMask) 324 { 325 NOREF(pvUser); NOREF(pDesc); NOREF(pValue); NOREF(pfMask); 326 return VERR_DBGF_READ_ONLY_REGISTER; 327 } 328 329 330 /** 331 * @interface_method_impl{DBGFREGDESC, pfnGet} 332 */ 333 static DECLCALLBACK(int) cpumR3RegGet_Dummy(void *pvUser, PCDBGFREGDESC pDesc, PDBGFREGVAL pValue) 334 { 335 switch (pDesc->enmType) 336 { 337 case DBGFREGVALTYPE_U8: pValue->u8 = 0; return VINF_SUCCESS; 338 case DBGFREGVALTYPE_U16: pValue->u16 = 0; return VINF_SUCCESS; 339 case DBGFREGVALTYPE_U32: pValue->u32 = 0; return VINF_SUCCESS; 340 case DBGFREGVALTYPE_U64: pValue->u64 = 0; return VINF_SUCCESS; 341 case DBGFREGVALTYPE_U128: 342 RT_ZERO(pValue->u128); 343 return VINF_SUCCESS; 344 case DBGFREGVALTYPE_DTR: 345 pValue->dtr.u32Limit = 0; 346 pValue->dtr.u64Base = 0; 347 return VINF_SUCCESS; 348 case DBGFREGVALTYPE_R80: 349 RT_ZERO(pValue->r80Ex); 350 return VINF_SUCCESS; 351 default: 352 AssertMsgFailedReturn(("%d %s\n", pDesc->enmType, pDesc->pszName), VERR_IPE_NOT_REACHED_DEFAULT_CASE); 353 } 354 } 355 356 357 /** 358 * @interface_method_impl{DBGFREGDESC, pfnSet} 359 */ 360 static DECLCALLBACK(int) cpumR3RegSet_Dummy(void *pvUser, PCDBGFREGDESC pDesc, PCDBGFREGVAL pValue, PCDBGFREGVAL pfMask) 253 361 { 254 362 NOREF(pvUser); NOREF(pDesc); NOREF(pValue); NOREF(pfMask); … … 511 619 Assert(pDesc->enmType == DBGFREGVALTYPE_R80); 512 620 621 PX86FXSTATE pFpuCtx = &pVCpu->cpum.s.Guest.CTX_SUFF(pXState)->x87; 513 622 if (cpumR3RegIsFxSaveFormat(pVCpu)) 514 623 { 515 unsigned iReg = (p VCpu->cpum.s.Guest.XState.x87.FSW >> 11) & 7;624 unsigned iReg = (pFpuCtx->FSW >> 11) & 7; 516 625 iReg += pDesc->offRegister; 517 626 iReg &= 7; 518 pValue->r80Ex = p VCpu->cpum.s.Guest.XState.x87.aRegs[iReg].r80Ex;627 pValue->r80Ex = pFpuCtx->aRegs[iReg].r80Ex; 519 628 } 520 629 else 521 630 { 522 PCX86FPUSTATE pOldFpu = (PCX86FPUSTATE)&pVCpu->cpum.s.Guest.XState.x87;523 524 unsigned iReg = (pOldFpu ->FSW >> 11) & 7;631 PCX86FPUSTATE pOldFpuCtx = (PCX86FPUSTATE)pFpuCtx; 632 633 unsigned iReg = (pOldFpuCtx->FSW >> 11) & 7; 525 634 iReg += pDesc->offRegister; 526 635 iReg &= 7; 527 528 pValue->r80Ex = pOldFpu->regs[iReg].r80Ex; 636 pValue->r80Ex = pOldFpuCtx->regs[iReg].r80Ex; 529 637 } 530 638 … … 663 771 return VERR_ACCESS_DENIED; 664 772 } 665 666 667 /**668 * @interface_method_impl{DBGFREGDESC, pfnGet}669 */670 static DECLCALLBACK(int) cpumR3RegHyperGet_stN(void *pvUser, PCDBGFREGDESC pDesc, PDBGFREGVAL pValue)671 {672 PVMCPU pVCpu = (PVMCPU)pvUser;673 674 VMCPU_ASSERT_EMT(pVCpu);675 Assert(pDesc->enmType == DBGFREGVALTYPE_R80);676 677 if (cpumR3RegIsFxSaveFormat(pVCpu))678 {679 unsigned iReg = (pVCpu->cpum.s.Guest.XState.x87.FSW >> 11) & 7;680 iReg += pDesc->offRegister;681 iReg &= 7;682 pValue->r80Ex = pVCpu->cpum.s.Guest.XState.x87.aRegs[iReg].r80Ex;683 }684 else685 {686 PCX86FPUSTATE pOldFpu = (PCX86FPUSTATE)&pVCpu->cpum.s.Guest.XState.x87;687 688 unsigned iReg = (pOldFpu->FSW >> 11) & 7;689 iReg += pDesc->offRegister;690 iReg &= 7;691 692 pValue->r80Ex = pOldFpu->regs[iReg].r80Ex;693 }694 695 return VINF_SUCCESS;696 }697 698 699 /**700 * @interface_method_impl{DBGFREGDESC, pfnGet}701 */702 static DECLCALLBACK(int) cpumR3RegHyperSet_stN(void *pvUser, PCDBGFREGDESC pDesc, PCDBGFREGVAL pValue, PCDBGFREGVAL pfMask)703 {704 /* There isn't a FPU context for the hypervisor yet, so no point in trying to set stuff. */705 NOREF(pvUser); NOREF(pDesc); NOREF(pValue); NOREF(pfMask);706 return VERR_ACCESS_DENIED;707 }708 709 773 710 774 … … 1079 1143 1080 1144 #define CPU_REG_MM(n) \ 1081 CPU_REG_ RW_AS("mm" #n, MM##n, U64, XState.x87.aRegs[n].mmx, cpumR3RegGet_Generic, cpumR3RegSet_Generic, NULL, g_aCpumRegFields_mmN)1145 CPU_REG_XS_RW_AS("mm" #n, MM##n, U64, x87.aRegs[n].mmx, cpumR3RegGet_XStateGeneric, cpumR3RegSet_XStateGeneric, NULL, g_aCpumRegFields_mmN) 1082 1146 1083 1147 #define CPU_REG_XMM(n) \ 1084 CPU_REG_ RW_AS("xmm" #n, XMM##n, U128, XState.x87.aXMM[n].xmm, cpumR3RegGet_Generic, cpumR3RegSet_Generic, NULL, g_aCpumRegFields_xmmN)1148 CPU_REG_XS_RW_AS("xmm" #n, XMM##n, U128, x87.aXMM[n].xmm, cpumR3RegGet_XStateGeneric, cpumR3RegSet_XStateGeneric, NULL, g_aCpumRegFields_xmmN) 1085 1149 /** @} */ 1086 1150 … … 1095 1159 #define CPU_REG_RO_AS(a_szName, a_RegSuff, a_TypeSuff, a_CpumCtxMemb, a_pfnGet, a_pfnSet, a_paAliases, a_paSubFields) \ 1096 1160 { a_szName, DBGFREG_##a_RegSuff, DBGFREGVALTYPE_##a_TypeSuff, DBGFREG_FLAGS_READ_ONLY, RT_OFFSETOF(CPUMCPU, Guest.a_CpumCtxMemb), a_pfnGet, a_pfnSet, a_paAliases, a_paSubFields } 1161 #define CPU_REG_XS_RW_AS(a_szName, a_RegSuff, a_TypeSuff, a_XStateMemb, a_pfnGet, a_pfnSet, a_paAliases, a_paSubFields) \ 1162 { a_szName, DBGFREG_##a_RegSuff, DBGFREGVALTYPE_##a_TypeSuff, 0 /*fFlags*/, RT_OFFSETOF(X86XSAVEAREA, a_XStateMemb), a_pfnGet, a_pfnSet, a_paAliases, a_paSubFields } 1163 #define CPU_REG_XS_RO_AS(a_szName, a_RegSuff, a_TypeSuff, a_XStateMemb, a_pfnGet, a_pfnSet, a_paAliases, a_paSubFields) \ 1164 { a_szName, DBGFREG_##a_RegSuff, DBGFREGVALTYPE_##a_TypeSuff, DBGFREG_FLAGS_READ_ONLY, RT_OFFSETOF(X86XSAVEAREA, a_XStateMemb), a_pfnGet, a_pfnSet, a_paAliases, a_paSubFields } 1097 1165 #define CPU_REG_MSR(a_szName, UName, a_TypeSuff, a_paSubFields) \ 1098 1166 CPU_REG_EX_AS(a_szName, MSR_##UName, a_TypeSuff, MSR_##UName, cpumR3RegGstGet_msr, cpumR3RegGstSet_msr, NULL, a_paSubFields) … … 1123 1191 CPU_REG_SEG(SS, ss), 1124 1192 CPU_REG_REG(RIP, rip), 1125 CPU_REG_RW_AS("rflags", RFLAGS, U64, rflags, cpumR3RegGet_Generic, cpumR3RegSet_Generic,g_aCpumRegAliases_rflags, g_aCpumRegFields_rflags ),1126 CPU_REG_ RW_AS("fcw", FCW, U16, XState.x87.FCW, cpumR3RegGet_Generic, cpumR3RegSet_Generic, NULL, g_aCpumRegFields_fcw ),1127 CPU_REG_ RW_AS("fsw", FSW, U16, XState.x87.FSW, cpumR3RegGet_Generic, cpumR3RegSet_Generic, NULL, g_aCpumRegFields_fsw ),1128 CPU_REG_ RO_AS("ftw", FTW, U16, XState.x87, cpumR3RegGet_ftw, cpumR3RegSet_ftw,NULL, g_aCpumRegFields_ftw ),1129 CPU_REG_ RW_AS("fop", FOP, U16, XState.x87.FOP, cpumR3RegGet_Generic, cpumR3RegSet_Generic, NULL, NULL ),1130 CPU_REG_ RW_AS("fpuip", FPUIP, U32, XState.x87.FPUIP, cpumR3RegGet_Generic, cpumR3RegSet_Generic, g_aCpumRegAliases_fpuip, NULL ),1131 CPU_REG_ RW_AS("fpucs", FPUCS, U16, XState.x87.CS, cpumR3RegGet_Generic, cpumR3RegSet_Generic, NULL, NULL ),1132 CPU_REG_ RW_AS("fpudp", FPUDP, U32, XState.x87.FPUDP, cpumR3RegGet_Generic, cpumR3RegSet_Generic, g_aCpumRegAliases_fpudp, NULL ),1133 CPU_REG_ RW_AS("fpuds", FPUDS, U16, XState.x87.DS, cpumR3RegGet_Generic, cpumR3RegSet_Generic, NULL, NULL ),1134 CPU_REG_ RW_AS("mxcsr", MXCSR, U32, XState.x87.MXCSR, cpumR3RegGet_Generic, cpumR3RegSet_Generic, NULL, g_aCpumRegFields_mxcsr ),1135 CPU_REG_ RW_AS("mxcsr_mask", MXCSR_MASK, U32, XState.x87.MXCSR_MASK, cpumR3RegGet_Generic, cpumR3RegSet_Generic, NULL, g_aCpumRegFields_mxcsr ),1193 CPU_REG_RW_AS("rflags", RFLAGS, U64, rflags, cpumR3RegGet_Generic, cpumR3RegSet_Generic, g_aCpumRegAliases_rflags, g_aCpumRegFields_rflags ), 1194 CPU_REG_XS_RW_AS("fcw", FCW, U16, x87.FCW, cpumR3RegGet_XStateGeneric, cpumR3RegSet_XStateGeneric, NULL, g_aCpumRegFields_fcw ), 1195 CPU_REG_XS_RW_AS("fsw", FSW, U16, x87.FSW, cpumR3RegGet_XStateGeneric, cpumR3RegSet_XStateGeneric, NULL, g_aCpumRegFields_fsw ), 1196 CPU_REG_XS_RO_AS("ftw", FTW, U16, x87, cpumR3RegGet_ftw, cpumR3RegSet_ftw, NULL, g_aCpumRegFields_ftw ), 1197 CPU_REG_XS_RW_AS("fop", FOP, U16, x87.FOP, cpumR3RegGet_XStateGeneric, cpumR3RegSet_XStateGeneric, NULL, NULL ), 1198 CPU_REG_XS_RW_AS("fpuip", FPUIP, U32, x87.FPUIP, cpumR3RegGet_XStateGeneric, cpumR3RegSet_XStateGeneric, g_aCpumRegAliases_fpuip, NULL ), 1199 CPU_REG_XS_RW_AS("fpucs", FPUCS, U16, x87.CS, cpumR3RegGet_XStateGeneric, cpumR3RegSet_XStateGeneric, NULL, NULL ), 1200 CPU_REG_XS_RW_AS("fpudp", FPUDP, U32, x87.FPUDP, cpumR3RegGet_XStateGeneric, cpumR3RegSet_XStateGeneric, g_aCpumRegAliases_fpudp, NULL ), 1201 CPU_REG_XS_RW_AS("fpuds", FPUDS, U16, x87.DS, cpumR3RegGet_XStateGeneric, cpumR3RegSet_XStateGeneric, NULL, NULL ), 1202 CPU_REG_XS_RW_AS("mxcsr", MXCSR, U32, x87.MXCSR, cpumR3RegGet_XStateGeneric, cpumR3RegSet_XStateGeneric, NULL, g_aCpumRegFields_mxcsr ), 1203 CPU_REG_XS_RW_AS("mxcsr_mask", MXCSR_MASK, U32, x87.MXCSR_MASK, cpumR3RegGet_XStateGeneric, cpumR3RegSet_XStateGeneric, NULL, g_aCpumRegFields_mxcsr ), 1136 1204 CPU_REG_ST(0), 1137 1205 CPU_REG_ST(1), … … 1223 1291 #define CPU_REG_RO_AS(a_szName, a_RegSuff, a_TypeSuff, a_CpumCtxMemb, a_pfnGet, a_pfnSet, a_paAliases, a_paSubFields) \ 1224 1292 { a_szName, DBGFREG_##a_RegSuff, DBGFREGVALTYPE_##a_TypeSuff, DBGFREG_FLAGS_READ_ONLY, RT_OFFSETOF(CPUMCPU, Hyper.a_CpumCtxMemb), a_pfnGet, a_pfnSet, a_paAliases, a_paSubFields } 1293 #define CPU_REG_DUMMY(a_szName, a_RegSuff, a_TypeSuff) \ 1294 { a_szName, DBGFREG_##a_RegSuff, DBGFREGVALTYPE_##a_TypeSuff, DBGFREG_FLAGS_READ_ONLY, 0, cpumR3RegGet_Dummy, cpumR3RegSet_Dummy, NULL, NULL} 1225 1295 #define CPU_REG_MSR(a_szName, UName, a_TypeSuff, a_paSubFields) \ 1226 1296 CPU_REG_EX_AS(a_szName, MSR_##UName, a_TypeSuff, MSR_##UName, cpumR3RegHyperGet_msr, cpumR3RegHyperSet_msr, NULL, a_paSubFields) 1227 #define CPU_REG_ST(n) \1228 CPU_REG_EX_AS("st" #n, ST##n, R80, n, cpumR3RegHyperGet_stN, cpumR3RegHyperSet_stN, NULL, g_aCpumRegFields_stN)1229 1297 1230 1298 CPU_REG_REG(RAX, rax), … … 1252 1320 CPU_REG_REG(RIP, rip), 1253 1321 CPU_REG_RW_AS("rflags", RFLAGS, U64, rflags, cpumR3RegGet_Generic, cpumR3RegSet_Generic, g_aCpumRegAliases_rflags, g_aCpumRegFields_rflags ), 1254 CPU_REG_ RW_AS("fcw", FCW, U16, XState.x87.FCW, cpumR3RegGet_Generic, cpumR3RegSet_Generic, NULL, g_aCpumRegFields_fcw),1255 CPU_REG_ RW_AS("fsw", FSW, U16, XState.x87.FSW, cpumR3RegGet_Generic, cpumR3RegSet_Generic, NULL, g_aCpumRegFields_fsw),1256 CPU_REG_ RO_AS("ftw", FTW, U16, XState.x87, cpumR3RegGet_ftw, cpumR3RegSet_ftw, NULL, g_aCpumRegFields_ftw),1257 CPU_REG_ RW_AS("fop", FOP, U16, XState.x87.FOP, cpumR3RegGet_Generic, cpumR3RegSet_Generic, NULL, NULL),1258 CPU_REG_ RW_AS("fpuip", FPUIP, U32, XState.x87.FPUIP, cpumR3RegGet_Generic, cpumR3RegSet_Generic, g_aCpumRegAliases_fpuip, NULL),1259 CPU_REG_ RW_AS("fpucs", FPUCS, U16, XState.x87.CS, cpumR3RegGet_Generic, cpumR3RegSet_Generic, NULL, NULL),1260 CPU_REG_ RW_AS("fpudp", FPUDP, U32, XState.x87.FPUDP, cpumR3RegGet_Generic, cpumR3RegSet_Generic, g_aCpumRegAliases_fpudp, NULL),1261 CPU_REG_ RW_AS("fpuds", FPUDS, U16, XState.x87.DS, cpumR3RegGet_Generic, cpumR3RegSet_Generic, NULL, NULL),1262 CPU_REG_ RW_AS("mxcsr", MXCSR, U32, XState.x87.MXCSR, cpumR3RegGet_Generic, cpumR3RegSet_Generic, NULL, g_aCpumRegFields_mxcsr),1263 CPU_REG_ RW_AS("mxcsr_mask", MXCSR_MASK, U32, XState.x87.MXCSR_MASK, cpumR3RegGet_Generic, cpumR3RegSet_Generic, NULL, g_aCpumRegFields_mxcsr),1264 CPU_REG_ ST(0),1265 CPU_REG_ ST(1),1266 CPU_REG_ ST(2),1267 CPU_REG_ ST(3),1268 CPU_REG_ ST(4),1269 CPU_REG_ ST(5),1270 CPU_REG_ ST(6),1271 CPU_REG_ ST(7),1272 CPU_REG_ MM(0),1273 CPU_REG_ MM(1),1274 CPU_REG_ MM(2),1275 CPU_REG_ MM(3),1276 CPU_REG_ MM(4),1277 CPU_REG_ MM(5),1278 CPU_REG_ MM(6),1279 CPU_REG_ MM(7),1280 CPU_REG_ XMM(0),1281 CPU_REG_ XMM(1),1282 CPU_REG_ XMM(2),1283 CPU_REG_ XMM(3),1284 CPU_REG_ XMM(4),1285 CPU_REG_ XMM(5),1286 CPU_REG_ XMM(6),1287 CPU_REG_ XMM(7),1288 CPU_REG_ XMM(8),1289 CPU_REG_ XMM(9),1290 CPU_REG_ XMM(10),1291 CPU_REG_ XMM(11),1292 CPU_REG_ XMM(12),1293 CPU_REG_ XMM(13),1294 CPU_REG_ XMM(14),1295 CPU_REG_ XMM(15),1322 CPU_REG_DUMMY("fcw", FCW, U16), 1323 CPU_REG_DUMMY("fsw", FSW, U16), 1324 CPU_REG_DUMMY("ftw", FTW, U16), 1325 CPU_REG_DUMMY("fop", FOP, U16), 1326 CPU_REG_DUMMY("fpuip", FPUIP, U32), 1327 CPU_REG_DUMMY("fpucs", FPUCS, U16), 1328 CPU_REG_DUMMY("fpudp", FPUDP, U32), 1329 CPU_REG_DUMMY("fpuds", FPUDS, U16), 1330 CPU_REG_DUMMY("mxcsr", MXCSR, U32), 1331 CPU_REG_DUMMY("mxcsr_mask", MXCSR_MASK, U32), 1332 CPU_REG_DUMMY("st0", ST0, R80), 1333 CPU_REG_DUMMY("st1", ST1, R80), 1334 CPU_REG_DUMMY("st2", ST2, R80), 1335 CPU_REG_DUMMY("st3", ST3, R80), 1336 CPU_REG_DUMMY("st4", ST4, R80), 1337 CPU_REG_DUMMY("st5", ST5, R80), 1338 CPU_REG_DUMMY("st6", ST6, R80), 1339 CPU_REG_DUMMY("st7", ST7, R80), 1340 CPU_REG_DUMMY("mm0", MM0, U64), 1341 CPU_REG_DUMMY("mm1", MM1, U64), 1342 CPU_REG_DUMMY("mm2", MM2, U64), 1343 CPU_REG_DUMMY("mm3", MM3, U64), 1344 CPU_REG_DUMMY("mm4", MM4, U64), 1345 CPU_REG_DUMMY("mm5", MM5, U64), 1346 CPU_REG_DUMMY("mm6", MM6, U64), 1347 CPU_REG_DUMMY("mm7", MM7, U64), 1348 CPU_REG_DUMMY("xmm0", XMM0, U128), 1349 CPU_REG_DUMMY("xmm1", XMM1, U128), 1350 CPU_REG_DUMMY("xmm2", XMM2, U128), 1351 CPU_REG_DUMMY("xmm3", XMM3, U128), 1352 CPU_REG_DUMMY("xmm4", XMM4, U128), 1353 CPU_REG_DUMMY("xmm5", XMM5, U128), 1354 CPU_REG_DUMMY("xmm6", XMM6, U128), 1355 CPU_REG_DUMMY("xmm7", XMM7, U128), 1356 CPU_REG_DUMMY("xmm8", XMM8, U128), 1357 CPU_REG_DUMMY("xmm9", XMM9, U128), 1358 CPU_REG_DUMMY("xmm10", XMM10, U128), 1359 CPU_REG_DUMMY("xmm11", XMM11, U128), 1360 CPU_REG_DUMMY("xmm12", XMM12, U128), 1361 CPU_REG_DUMMY("xmm13", XMM13, U128), 1362 CPU_REG_DUMMY("xmm14", XMM14, U128), 1363 CPU_REG_DUMMY("xmm15", XMM15, U128), 1296 1364 CPU_REG_RW_AS("gdtr_base", GDTR_BASE, U64, gdtr.pGdt, cpumR3RegGet_Generic, cpumR3RegSet_Generic, NULL, NULL ), 1297 1365 CPU_REG_RW_AS("gdtr_lim", GDTR_LIMIT, U16, gdtr.cbGdt, cpumR3RegGet_Generic, cpumR3RegSet_Generic, NULL, NULL ), -
trunk/src/VBox/VMM/VMMR3/SSM.cpp
r51598 r55048 6672 6672 * Begin marker. 6673 6673 */ 6674 if (!(fFlags & SSMSTRUCT_FLAGS_NO_MARKERS))6674 if (!(fFlags & (SSMSTRUCT_FLAGS_NO_MARKERS | SSMSTRUCT_FLAGS_NO_LEAD_MARKER))) 6675 6675 { 6676 6676 rc = SSMR3GetU32(pSSM, &u32Magic); … … 6904 6904 * End marker 6905 6905 */ 6906 if (!(fFlags & SSMSTRUCT_FLAGS_NO_MARKERS))6906 if (!(fFlags & (SSMSTRUCT_FLAGS_NO_MARKERS | SSMSTRUCT_FLAGS_NO_TAIL_MARKER))) 6907 6907 { 6908 6908 rc = SSMR3GetU32(pSSM, &u32Magic); -
trunk/src/VBox/VMM/VMMRC/CPUMRCA.asm
r55027 r55048 62 62 align 16 63 63 BEGINPROC cpumHandleLazyFPUAsm 64 push ebx 65 push esi 66 mov ebx, [esp + 4] 67 %define pCpumCpu ebx 68 %define pXState esi 69 64 70 ; 65 71 ; Figure out what to do. … … 92 98 ; loaded the GC FPU. Because if we have, this is an trap for the guest - raw ring-3. 93 99 ; 94 %ifdef RT_ARCH_AMD64 95 %ifdef RT_OS_WINDOWS 96 mov xDX, rcx 97 %else 98 mov xDX, rdi 99 %endif 100 %else 101 mov xDX, dword [esp + 4] 102 %endif 103 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU 100 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USED_FPU 104 101 jz hlfpua_not_loaded 105 jmp hlfpua_ to_host102 jmp hlfpua_guest_trap 106 103 107 104 ; … … 110 107 align 16 111 108 hlfpua_not_loaded: 112 mov eax, [ xDX+ CPUMCPU.Guest.cr0]109 mov eax, [pCpumCpu + CPUMCPU.Guest.cr0] 113 110 and eax, X86_CR0_MP | X86_CR0_EM | X86_CR0_TS 114 %ifdef RT_ARCH_AMD64115 lea r8, [hlfpuajmp1 wrt rip]116 jmp qword [rax*4 + r8]117 %else118 111 jmp dword [eax*2 + hlfpuajmp1] 119 %endif120 112 align 16 121 113 ;; jump table using fpu related cr0 flags as index. … … 126 118 RTCCPTR_DEF hlfpua_switch_fpu_ctx 127 119 RTCCPTR_DEF hlfpua_switch_fpu_ctx 128 RTCCPTR_DEF hlfpua_ to_host129 RTCCPTR_DEF hlfpua_switch_fpu_ctx 130 RTCCPTR_DEF hlfpua_ to_host120 RTCCPTR_DEF hlfpua_guest_trap 121 RTCCPTR_DEF hlfpua_switch_fpu_ctx 122 RTCCPTR_DEF hlfpua_guest_trap 131 123 ;; and mask for cr0. 132 124 hlfpu_afFlags: … … 145 137 align 16 146 138 hlfpua_switch_fpu_ctx: 147 ; Paranoia. This function was previously used in ring-0, not any longer. 148 %ifdef IN_RING3 149 %error "This function is not written for ring-3" 150 %endif 151 %ifdef IN_RING0 152 %error "This function is not written for ring-0" 153 %endif 154 155 mov xCX, cr0 156 %ifdef RT_ARCH_AMD64 157 lea r8, [hlfpu_afFlags wrt rip] 158 and rcx, [rax*4 + r8] ; calc the new cr0 flags. 159 %else 160 and ecx, [eax*2 + hlfpu_afFlags] ; calc the new cr0 flags. 161 %endif 162 mov xAX, cr0 163 and xAX, ~(X86_CR0_TS | X86_CR0_EM) 164 mov cr0, xAX ; clear flags so we don't trap here. 165 %ifndef RT_ARCH_AMD64 166 mov eax, edx ; Calculate the PCPUM pointer 167 sub eax, [edx + CPUMCPU.offCPUM] 139 mov ecx, cr0 140 mov edx, ecx 141 and ecx, [eax*2 + hlfpu_afFlags] ; Calc the new cr0 flags. Do NOT use ECX until we restore it! 142 and edx, ~(X86_CR0_TS | X86_CR0_EM) 143 mov cr0, edx ; Clear flags so we don't trap here. 144 145 mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateRC] 146 mov eax, pCpumCpu ; Calculate the PCPUM pointer 147 sub eax, [pCpumCpu + CPUMCPU.offCPUM] 168 148 test dword [eax + CPUM.CPUFeatures.edx], X86_CPUID_FEATURE_EDX_FXSR 169 149 jz short hlfpua_no_fxsave 170 %endif 171 172 %ifdef RT_ARCH_AMD64 173 ; Use explicit REX prefix. See @bugref{6398}. 174 o64 fxsave [xDX + CPUMCPU.Host.XState] 175 %else 176 fxsave [xDX + CPUMCPU.Host.XState] 177 %endif 178 or dword [xDX + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM) 179 fxrstor [xDX + CPUMCPU.Guest.XState] ; raw-mode guest is always 32-bit. See @bugref{7138}. 150 151 fxsave [pXState] 152 mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateRC] 153 fxrstor [pXState] 180 154 181 155 hlfpua_finished_switch: 156 or dword [pCpumCpu + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM) 182 157 183 158 ; Load new CR0 value. 184 ;; @todo Optimize the many unconditional CR0 writes. 185 mov cr0, xCX ; load the new cr0 flags. 159 mov cr0, ecx ; load the new cr0 flags. 186 160 187 161 ; return continue execution. 162 pop esi 163 pop ebx 188 164 xor eax, eax 189 165 ret 190 166 191 %ifndef RT_ARCH_AMD64 192 ; legacy support. 167 ; 168 ; Legacy CPU support. 169 ; 193 170 hlfpua_no_fxsave: 194 fnsave [ xDX + CPUMCPU.Host.XState]195 or dword [xDX + CPUMCPU.fUseFlags], dword (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM) ; yasm / nasm196 mov eax, [ xDX + CPUMCPU.Guest.XState]; control word197 not eax 198 and eax, byte 03Fh 199 test eax, [ xDX + CPUMCPU.Guest.XState + 4]; status word171 fnsave [pXState] 172 mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateRC] 173 mov eax, [pXState] ; control word 174 not eax ; 1 means exception ignored (6 LS bits) 175 and eax, byte 03Fh ; 6 LS bits only 176 test eax, [pXState + 4] ; status word 200 177 jz short hlfpua_no_exceptions_pending 201 ; technically incorrect, but we certainly don't want any exceptions now!!202 and dword [ xDX + CPUMCPU.Guest.XState + 4], ~03Fh178 ; Technically incorrect, but we certainly don't want any exceptions now!! 179 and dword [pXState + 4], ~03Fh 203 180 hlfpua_no_exceptions_pending: 204 frstor [ xDX + CPUMCPU.Guest.XState]181 frstor [pXState] 205 182 jmp near hlfpua_finished_switch 206 %endif ; !RT_ARCH_AMD64207 208 183 209 184 ; … … 211 186 ; 212 187 hlfpua_action_4: 213 hlfpua_to_host: 188 hlfpua_guest_trap: 189 pop esi 190 pop ebx 214 191 mov eax, VINF_EM_RAW_GUEST_TRAP 215 192 ret -
trunk/src/VBox/VMM/VMMSwitcher/AMD64andLegacy.mac
r54898 r55048 1144 1144 mov cr0, rcx 1145 1145 1146 fxsave [rdx + r8 + CPUMCPU.Guest.XState] 1147 o64 fxrstor [rdx + r8 + CPUMCPU.Host.XState] ; Restore 64-bit host FPU state. See @bugref{7138} 1146 mov rax, [rdx + r8 + CPUMCPU.Guest.pXStateR0] 1147 fxsave [rax] 1148 mov rax, [rdx + r8 + CPUMCPU.Host.pXStateR0] 1149 fxrstor [rax] ; We saved 32-bit state, so only restore 32-bit. 1148 1150 jmp short gth_fpu_no 1149 1151 -
trunk/src/VBox/VMM/VMMSwitcher/LegacyandAMD64.mac
r54898 r55048 664 664 and rax, ~(X86_CR0_TS | X86_CR0_EM) 665 665 mov cr0, rax 666 ; Use explicit REX prefix. See @bugref{6398}.667 o64 fxrstor [r dx + CPUMCPU.Guest.XState]666 mov eax, [rdx + CPUMCPU.Guest.pXStateRC] 667 o64 fxrstor [rax] ; (use explicit REX prefix, see @bugref{6398}) 668 668 mov cr0, rcx ; and restore old CR0 again 669 669 … … 725 725 ; parameter for all helper functions (pCtx) 726 726 DEBUG64_CHAR('9') 727 lea rsi, [rdx + CPUMCPU.Guest .XState]727 lea rsi, [rdx + CPUMCPU.Guest] 728 728 lea rax, [htg_return wrt rip] 729 729 push rax ; return address … … 1258 1258 mov cr0, rax 1259 1259 1260 ; Use explicit REX prefix. See @bugref{6398}.1261 o64 fxsave [r si + CPUMCTX.XState]1260 mov eax, [rsi + CPUMCTX.pXStateRC] 1261 o64 fxsave [rax] ; (use explicit REX prefix, see @bugref{6398}) 1262 1262 1263 1263 mov cr0, rcx ; and restore old CR0 again -
trunk/src/VBox/VMM/VMMSwitcher/PAEand32Bit.mac
r54898 r55048 990 990 mov cr0, ecx 991 991 992 mov eax, [edx + CPUMCPU.Guest.pXStateR0] 993 mov ecx, [edx + CPUMCPU.Host.pXStateR0] 992 994 FIXUP FIX_NO_FXSAVE_JMP, 0, gth_no_fxsave - NAME(Start) ; this will insert a jmp gth_no_fxsave if fxsave isn't supported. 993 fxsave [e dx + CPUMCPU.Guest.XState]994 fxrstor [e dx + CPUMCPU.Host.XState]995 fxsave [eax] 996 fxrstor [ecx] 995 997 jmp near gth_fpu_no 996 998 997 999 gth_no_fxsave: 998 fnsave [e dx + CPUMCPU.Guest.XState]999 mov eax, [e dx + CPUMCPU.Host.XState]; control word1000 not eax 1001 and eax, byte 03Fh 1002 test eax, [e dx + CPUMCPU.Host.XState + 4]; status word1000 fnsave [eax] 1001 mov eax, [ecx] ; control word 1002 not eax ; 1 means exception ignored (6 LS bits) 1003 and eax, byte 03Fh ; 6 LS bits only 1004 test eax, [ecx + 4] ; status word 1003 1005 jz gth_no_exceptions_pending 1004 1006 1005 1007 ; technically incorrect, but we certainly don't want any exceptions now!! 1006 and dword [e dx + CPUMCPU.Host.XState+ 4], ~03Fh1008 and dword [ecx + 4], ~03Fh 1007 1009 1008 1010 gth_no_exceptions_pending: 1009 frstor [e dx + CPUMCPU.Host.XState]1011 frstor [ecx] 1010 1012 jmp short gth_fpu_no 1011 1013 -
trunk/src/VBox/VMM/include/CPUMInternal.h
r54898 r55048 286 286 typedef struct CPUMHOSTCTX 287 287 { 288 /** FPU state. (16-byte alignment)289 * @remark On x86, the format isn't necessarily X86FXSTATE (not important). */290 X86XSAVEAREA XState;291 292 288 /** General purpose register, selectors, flags and more 293 289 * @{ */ … … 389 385 390 386 /* padding to get 64byte aligned size */ 391 uint8_t auPadding[16+ 32];387 uint8_t auPadding[16+12]; 392 388 393 389 #elif HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) … … 435 431 /* padding to get 32byte aligned size */ 436 432 # ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 437 uint8_t auPadding[ 16];433 uint8_t auPadding[4]; 438 434 # else 439 uint8_t auPadding[8+ 32];435 uint8_t auPadding[8+12]; 440 436 # endif 441 437 … … 443 439 # error HC_ARCH_BITS not defined 444 440 #endif 441 442 /** Pointer to the FPU/SSE/AVX/XXXX state raw-mode mapping. */ 443 RCPTRTYPE(PX86XSAVEAREA) pXStateRC; 444 /** Pointer to the FPU/SSE/AVX/XXXX state ring-0 mapping. */ 445 R0PTRTYPE(PX86XSAVEAREA) pXStateR0; 446 /** Pointer to the FPU/SSE/AVX/XXXX state ring-3 mapping. */ 447 R3PTRTYPE(PX86XSAVEAREA) pXStateR3; 445 448 } CPUMHOSTCTX; 446 449 /** Pointer to the saved host CPU state. */ -
trunk/src/VBox/VMM/include/CPUMInternal.mac
r55004 r55048 126 126 ; (Identical to the .Hyper chunk below.) 127 127 ; 128 alignb 64 129 .Guest.XState resb XSTATE_SIZE 128 .Guest resq 0 130 129 .Guest.eax resq 1 131 130 .Guest.ecx resq 1 … … 226 225 .Guest.msrKERNELGSBASE resb 8 227 226 .Guest.msrApicBase resb 8 228 227 .Guest.pXStateR0 RTR0PTR_RES 1 228 .Guest.pXStateR3 RTR3PTR_RES 1 229 .Guest.pXStateRC RTRCPTR_RES 1 229 230 230 231 alignb 64 232 .GuestMsrs resq 0 231 233 .GuestMsrs.au64 resq 64 232 234 … … 256 258 ; 257 259 alignb 64 258 .Host.XState resb XSTATE_SIZE 259 260 .Host resb 0 260 261 %if HC_ARCH_BITS == 64 || fVBOX_WITH_HYBRID_32BIT_KERNEL 261 262 ;.Host.rax resq 1 - scratch … … 331 332 .Host.SysEnter.esp resq 1 332 333 .Host.efer resq 1 334 .Host.auPadding resb (16+12) 333 335 334 336 %else ; 64-bit … … 362 364 .Host.GSbase resq 1 363 365 .Host.efer resq 1 366 %if fVBOX_WITH_HYBRID_32BIT_KERNEL 367 .Host.auPadding resb 4 368 %else 369 .Host.auPadding resb (8+12) 370 %endif 364 371 %endif ; 64-bit 372 .Host.pXStateRC RTRCPTR_RES 1 373 alignb RTR0PTR_CB 374 .Host.pXStateR0 RTR0PTR_RES 1 375 .Host.pXStateR3 RTR3PTR_RES 1 365 376 366 377 ; … … 368 379 ; 369 380 alignb 64 370 .Hyper .XState resb XSTATE_SIZE381 .Hyper resq 0 371 382 .Hyper.eax resq 1 372 383 .Hyper.ecx resq 1 … … 467 478 .Hyper.msrKERNELGSBASE resb 8 468 479 .Hyper.msrApicBase resb 8 480 .Hyper.pXStateR0 RTR0PTR_RES 1 481 .Hyper.pXStateR3 RTR3PTR_RES 1 482 .Hyper.pXStateRC RTRCPTR_RES 1 469 483 alignb 64 470 484 -
trunk/src/VBox/VMM/testcase/tstVMStruct.h
r54898 r55048 63 63 64 64 GEN_CHECK_SIZE(CPUMHOSTCTX); 65 GEN_CHECK_OFF(CPUMHOSTCTX, XState); 65 GEN_CHECK_OFF(CPUMHOSTCTX, pXStateR3); 66 GEN_CHECK_OFF(CPUMHOSTCTX, pXStateR0); 67 GEN_CHECK_OFF(CPUMHOSTCTX, pXStateRC); 66 68 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 67 69 GEN_CHECK_OFF(CPUMHOSTCTX, rbx); … … 132 134 133 135 GEN_CHECK_SIZE(CPUMCTX); 134 GEN_CHECK_OFF(CPUMCTX, XState); 136 GEN_CHECK_OFF(CPUMCTX, pXStateR0); 137 GEN_CHECK_OFF(CPUMCTX, pXStateR3); 138 GEN_CHECK_OFF(CPUMCTX, pXStateRC); 135 139 GEN_CHECK_OFF(CPUMCTX, rdi); 136 140 GEN_CHECK_OFF(CPUMCTX, rsi); -
trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp
r54898 r55048 298 298 299 299 /* cpumctx */ 300 CHECK_MEMBER_ALIGNMENT(CPUMCTX, XState, 64);301 300 CHECK_MEMBER_ALIGNMENT(CPUMCTX, rax, 32); 302 301 CHECK_MEMBER_ALIGNMENT(CPUMCTX, idtr.pIdt, 8); -
trunk/src/recompiler/VBoxRecompiler.c
r55029 r55048 2358 2358 /* Sync FPU state after CR4, CPUID and EFER (!). */ 2359 2359 if (fFlags & CPUM_CHANGED_FPU_REM) 2360 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx-> XState.x87); /* 'save' is an excellent name. */2360 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->pXStateR3->x87); /* 'save' is an excellent name. */ 2361 2361 } 2362 2362 … … 2551 2551 2552 2552 /** @todo check if FPU/XMM was actually used in the recompiler */ 2553 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx-> XState.x87);2553 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->pXStateR3->x87); 2554 2554 //// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW)); 2555 2555 … … 2816 2816 */ 2817 2817 2818 PX86FXSTATE pFpuCtx = &pCtx->pXStateR3->x87; 2818 2819 /** @todo FOP */ 2819 2820 /** @todo FPUIP */ … … 2822 2823 /** @todo DS */ 2823 2824 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */ 2824 p Ctx->XState.x87.MXCSR = 0;2825 p Ctx->XState.x87.MXCSR_MASK = 0;2825 pFpuCtx->MXCSR = 0; 2826 pFpuCtx->MXCSR_MASK = 0; 2826 2827 2827 2828 /** @todo check if FPU/XMM was actually used in the recompiler */ 2828 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *) &pCtx->XState.x87);2829 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)pFpuCtx); 2829 2830 //// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW)); 2830 2831
Note:
See TracChangeset
for help on using the changeset viewer.