- Timestamp:
- Jul 4, 2016 9:46:23 PM (9 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR3/IEMR3.cpp
r61968 r62016 63 63 { 64 64 PVMCPU pVCpu = &pVM->aCpus[idCpu]; 65 pVCpu->iem.s.offVM = -RT_OFFSETOF(VM, aCpus[idCpu].iem.s); 66 pVCpu->iem.s.offVMCpu = -RT_OFFSETOF(VMCPU, iem.s); 67 pVCpu->iem.s.pCtxR3 = CPUMQueryGuestCtxPtr(pVCpu); 68 pVCpu->iem.s.pCtxR0 = VM_R0_ADDR(pVM, pVCpu->iem.s.pCtxR3); 69 pVCpu->iem.s.pCtxRC = VM_RC_ADDR(pVM, pVCpu->iem.s.pCtxR3); 65 pVCpu->iem.s.pCtxR3 = CPUMQueryGuestCtxPtr(pVCpu); 66 pVCpu->iem.s.pCtxR0 = VM_R0_ADDR(pVM, pVCpu->iem.s.pCtxR3); 67 pVCpu->iem.s.pCtxRC = VM_RC_ADDR(pVM, pVCpu->iem.s.pCtxR3); 70 68 71 69 STAMR3RegisterF(pVM, &pVCpu->iem.s.cInstructions, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, -
trunk/src/VBox/VMM/include/IEMInternal.h
r62015 r62016 388 388 /** 389 389 * The per-CPU IEM state. 390 *391 * @todo Re-org so most frequently accessed members are in the first 64 bytes.392 390 */ 393 391 typedef struct IEMCPU … … 395 393 /** Pointer to the CPU context - ring-3 context. */ 396 394 R3PTRTYPE(PCPUMCTX) pCtxR3; 397 /** Pointer set jump buffer - ring-3 context. */398 R3PTRTYPE(jmp_buf *) pJmpBufR3;399 395 /** Pointer to the CPU context - ring-0 context. */ 400 396 R0PTRTYPE(PCPUMCTX) pCtxR0; 401 /** Pointer set jump buffer - ring-0 context. */402 R0PTRTYPE(jmp_buf *) pJmpBufR0;403 397 /** Pointer to the CPU context - raw-mode context. */ 404 398 RCPTRTYPE(PCPUMCTX) pCtxRC; 405 /** Pointer set jump buffer - raw-mode context. */ 406 RCPTRTYPE(jmp_buf *) pJmpBufRC; 407 408 /** Offset of the VMCPU structure relative to this structure (negative). */ 409 int32_t offVMCpu; 410 /** Offset of the VM structure relative to this structure (negative). */ 411 int32_t offVM; 399 400 /** Info status code that needs to be propagated to the IEM caller. 401 * This cannot be passed internally, as it would complicate all success 402 * checks within the interpreter making the code larger and almost impossible 403 * to get right. Instead, we'll store status codes to pass on here. Each 404 * source of these codes will perform appropriate sanity checks. */ 405 int32_t rcPassUp; 406 407 /** The current CPU execution mode (CS). */ 408 IEMMODE enmCpuMode; 409 /** The CPL. */ 410 uint8_t uCpl; 412 411 413 412 /** Whether to bypass access handlers or not. */ … … 415 414 /** Indicates that we're interpreting patch code - RC only! */ 416 415 bool fInPatchCode; 416 417 /** @name Decoder state. 418 * @{ */ 419 /** The current offset into abOpcodes. */ 420 uint8_t offOpcode; 421 /** The size of what has currently been fetched into abOpcodes. */ 422 uint8_t cbOpcode; 423 424 /** The effective segment register (X86_SREG_XXX). */ 425 uint8_t iEffSeg; 426 427 /** The extra REX ModR/M register field bit (REX.R << 3). */ 428 uint8_t uRexReg; 429 /** The extra REX ModR/M r/m field, SIB base and opcode reg bit 430 * (REX.B << 3). */ 431 uint8_t uRexB; 432 /** The prefix mask (IEM_OP_PRF_XXX). */ 433 uint32_t fPrefixes; 434 /** The extra REX SIB index field bit (REX.X << 3). */ 435 uint8_t uRexIndex; 436 437 /** Offset into abOpcodes where the FPU instruction starts. 438 * Only set by the FPU escape opcodes (0xd8-0xdf) and used later on when the 439 * instruction result is committed. */ 440 uint8_t offFpuOpcode; 441 417 442 /** Explicit alignment padding. */ 418 bool afAlignment0[2+4]; 443 uint8_t abAlignment1[2]; 444 445 /** The effective operand mode . */ 446 IEMMODE enmEffOpSize; 447 /** The default addressing mode . */ 448 IEMMODE enmDefAddrMode; 449 /** The effective addressing mode . */ 450 IEMMODE enmEffAddrMode; 451 /** The default operand mode . */ 452 IEMMODE enmDefOpSize; 453 454 /** The opcode bytes. */ 455 uint8_t abOpcode[15]; 456 /** Explicit alignment padding. */ 457 uint8_t abAlignment2[HC_ARCH_BITS == 64 ? 5 : 1]; 458 /** @} */ 419 459 420 460 /** The flags of the current exception / interrupt. */ … … 424 464 /** Exception / interrupt recursion depth. */ 425 465 int8_t cXcptRecursions; 426 /** Explicit alignment padding. */ 427 bool afAlignment1[1]; 428 /** The CPL. */ 429 uint8_t uCpl; 430 /** The current CPU execution mode (CS). */ 431 IEMMODE enmCpuMode; 432 /** Info status code that needs to be propagated to the IEM caller. 433 * This cannot be passed internally, as it would complicate all success 434 * checks within the interpreter making the code larger and almost impossible 435 * to get right. Instead, we'll store status codes to pass on here. Each 436 * source of these codes will perform appropriate sanity checks. */ 437 int32_t rcPassUp; 466 467 /** The number of active guest memory mappings. */ 468 uint8_t cActiveMappings; 469 /** The next unused mapping index. */ 470 uint8_t iNextMapping; 471 /** Records for tracking guest memory mappings. */ 472 struct 473 { 474 /** The address of the mapped bytes. */ 475 void *pv; 476 #if defined(IN_RC) && HC_ARCH_BITS == 64 477 uint32_t u32Alignment3; /**< Alignment padding. */ 478 #endif 479 /** The access flags (IEM_ACCESS_XXX). 480 * IEM_ACCESS_INVALID if the entry is unused. */ 481 uint32_t fAccess; 482 #if HC_ARCH_BITS == 64 483 uint32_t u32Alignment4; /**< Alignment padding. */ 484 #endif 485 } aMemMappings[3]; 486 487 /** Locking records for the mapped memory. */ 488 union 489 { 490 PGMPAGEMAPLOCK Lock; 491 uint64_t au64Padding[2]; 492 } aMemMappingLocks[3]; 493 494 /** Bounce buffer info. 495 * This runs in parallel to aMemMappings. */ 496 struct 497 { 498 /** The physical address of the first byte. */ 499 RTGCPHYS GCPhysFirst; 500 /** The physical address of the second page. */ 501 RTGCPHYS GCPhysSecond; 502 /** The number of bytes in the first page. */ 503 uint16_t cbFirst; 504 /** The number of bytes in the second page. */ 505 uint16_t cbSecond; 506 /** Whether it's unassigned memory. */ 507 bool fUnassigned; 508 /** Explicit alignment padding. */ 509 bool afAlignment5[3]; 510 } aMemBbMappings[3]; 511 512 /** Bounce buffer storage. 513 * This runs in parallel to aMemMappings and aMemBbMappings. */ 514 struct 515 { 516 uint8_t ab[512]; 517 } aBounceBuffers[3]; 518 519 520 /** Pointer set jump buffer - ring-3 context. */ 521 R3PTRTYPE(jmp_buf *) pJmpBufR3; 522 /** Pointer set jump buffer - ring-0 context. */ 523 R0PTRTYPE(jmp_buf *) pJmpBufR0; 524 /** Pointer set jump buffer - raw-mode context. */ 525 RCPTRTYPE(jmp_buf *) pJmpBufRC; 526 438 527 439 528 /** @name Statistics … … 496 585 RTGCPHYS GCPhysOpcodes; 497 586 #endif 498 /** @} */499 500 /** @name Decoder state.501 * @{ */502 503 /** The default addressing mode . */504 IEMMODE enmDefAddrMode;505 /** The effective addressing mode . */506 IEMMODE enmEffAddrMode;507 /** The default operand mode . */508 IEMMODE enmDefOpSize;509 /** The effective operand mode . */510 IEMMODE enmEffOpSize;511 512 /** The prefix mask (IEM_OP_PRF_XXX). */513 uint32_t fPrefixes;514 /** The extra REX ModR/M register field bit (REX.R << 3). */515 uint8_t uRexReg;516 /** The extra REX ModR/M r/m field, SIB base and opcode reg bit517 * (REX.B << 3). */518 uint8_t uRexB;519 /** The extra REX SIB index field bit (REX.X << 3). */520 uint8_t uRexIndex;521 /** The effective segment register (X86_SREG_XXX). */522 uint8_t iEffSeg;523 524 /** The current offset into abOpcodes. */525 uint8_t offOpcode;526 /** The size of what has currently been fetched into abOpcodes. */527 uint8_t cbOpcode;528 /** The opcode bytes. */529 uint8_t abOpcode[15];530 /** Offset into abOpcodes where the FPU instruction starts.531 * Only set by the FPU escape opcodes (0xd8-0xdf) and used later on when the532 * instruction result is committed. */533 uint8_t offFpuOpcode;534 535 587 /** @} */ 536 537 /** The number of active guest memory mappings. */538 uint8_t cActiveMappings;539 /** The next unused mapping index. */540 uint8_t iNextMapping;541 /** Records for tracking guest memory mappings. */542 struct543 {544 /** The address of the mapped bytes. */545 void *pv;546 #if defined(IN_RC) && HC_ARCH_BITS == 64547 uint32_t u32Alignment3; /**< Alignment padding. */548 #endif549 /** The access flags (IEM_ACCESS_XXX).550 * IEM_ACCESS_INVALID if the entry is unused. */551 uint32_t fAccess;552 #if HC_ARCH_BITS == 64553 uint32_t u32Alignment4; /**< Alignment padding. */554 #endif555 } aMemMappings[3];556 557 /** Locking records for the mapped memory. */558 union559 {560 PGMPAGEMAPLOCK Lock;561 uint64_t au64Padding[2];562 } aMemMappingLocks[3];563 564 /** Bounce buffer info.565 * This runs in parallel to aMemMappings. */566 struct567 {568 /** The physical address of the first byte. */569 RTGCPHYS GCPhysFirst;570 /** The physical address of the second page. */571 RTGCPHYS GCPhysSecond;572 /** The number of bytes in the first page. */573 uint16_t cbFirst;574 /** The number of bytes in the second page. */575 uint16_t cbSecond;576 /** Whether it's unassigned memory. */577 bool fUnassigned;578 /** Explicit alignment padding. */579 bool afAlignment5[3];580 } aMemBbMappings[3];581 582 /** Bounce buffer storage.583 * This runs in parallel to aMemMappings and aMemBbMappings. */584 struct585 {586 uint8_t ab[512];587 } aBounceBuffers[3];588 588 589 589 /** @name Target CPU information. … … 605 605 /** @} */ 606 606 607 uint32_t au32Alignment6[HC_ARCH_BITS == 64 ? 1 + 10 : 1+ 4]; /**< Alignment padding. */607 uint32_t au32Alignment6[HC_ARCH_BITS == 64 ? 1 + 4 + 8 : 1 + 2 + 4]; /**< Alignment padding. */ 608 608 609 609 /** Data TLB. … … 634 634 typedef IEMCPU const *PCIEMCPU; 635 635 636 /** Converts a IEMCPU pointer to a VMCPU pointer.637 * @returns VMCPU pointer.638 * @param a_pIemCpu The IEM per CPU instance data.639 */640 #define IEMCPU_TO_VMCPU(a_pIemCpu) ((PVMCPU)( (uintptr_t)(a_pIemCpu) + a_pIemCpu->offVMCpu ))641 642 /** Converts a IEMCPU pointer to a VM pointer.643 * @returns VM pointer.644 * @param a_pIemCpu The IEM per CPU instance data.645 */646 #define IEMCPU_TO_VM(a_pIemCpu) ((PVM)( (uintptr_t)(a_pIemCpu) + a_pIemCpu->offVM ))647 636 648 637 /** Gets the current IEMTARGETCPU value. -
trunk/src/VBox/VMM/testcase/tstVMStruct.h
r62006 r62016 269 269 GEN_CHECK_OFF(IEMCPU, pCtxR3); 270 270 GEN_CHECK_OFF(IEMCPU, pCtxRC); 271 GEN_CHECK_OFF(IEMCPU, offVM);272 GEN_CHECK_OFF(IEMCPU, offVMCpu);273 271 GEN_CHECK_OFF(IEMCPU, enmCpuMode); 274 272 GEN_CHECK_OFF(IEMCPU, fPrefixes); … … 282 280 GEN_CHECK_OFF(IEMCPU, aMemBbMappings); 283 281 GEN_CHECK_OFF(IEMCPU, aMemBbMappings[1]); 282 GEN_CHECK_OFF(IEMCPU, DataTlb); 283 GEN_CHECK_OFF(IEMCPU, CodeTlb); 284 284 285 285 GEN_CHECK_SIZE(IOM);
Note:
See TracChangeset
for help on using the changeset viewer.