Changeset 81637 in vbox for trunk/src/VBox/VMM/include
- Timestamp:
- Nov 4, 2019 4:22:10 AM (5 years ago)
- Location:
- trunk/src/VBox/VMM/include
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/include/HMInternal.h
r81578 r81637 448 448 typedef struct HM 449 449 { 450 /** Set if nested paging is enabled. */ 451 bool fNestedPaging; 450 452 /** Set when we've initialized VMX or SVM. */ 451 453 bool fInitialized; 452 /** Set if nested paging is enabled. */453 bool fNestedPaging;454 454 /** Set if nested paging is allowed. */ 455 455 bool fAllowNestedPaging; … … 525 525 uint8_t cPreemptTimerShift; 526 526 527 /** Virtual address of the TSS page used for real mode emulation. */528 R3PTRTYPE(PVBOXTSS) pRealModeTSS;529 /** Virtual address of the identity page table used for real mode and protected530 * mode without paging emulation in EPT mode. */531 R3PTRTYPE(PX86PD) pNonPagingModeEPTPageTable;532 533 /** Physical address of the APIC-access page. */534 RTHCPHYS HCPhysApicAccess;535 /** R0 memory object for the APIC-access page. */536 RTR0MEMOBJ hMemObjApicAccess;537 527 /** Virtual address of the APIC-access page. */ 538 528 R0PTRTYPE(uint8_t *) pbApicAccess; 539 540 /** Physical address of the VMREAD bitmap. */541 RTHCPHYS HCPhysVmreadBitmap;542 /** Ring-0 memory object for the VMREAD bitmap. */543 RTR0MEMOBJ hMemObjVmreadBitmap;544 529 /** Pointer to the VMREAD bitmap. */ 545 530 R0PTRTYPE(void *) pvVmreadBitmap; 546 547 /** Physical address of the VMWRITE bitmap. */548 RTHCPHYS HCPhysVmwriteBitmap;549 /** Ring-0 memory object for the VMWRITE bitmap. */550 RTR0MEMOBJ hMemObjVmwriteBitmap;551 531 /** Pointer to the VMWRITE bitmap. */ 552 532 R0PTRTYPE(void *) pvVmwriteBitmap; 553 533 554 #ifdef VBOX_WITH_CRASHDUMP_MAGIC 555 /** Physical address of the crash-dump scratch area. */556 RTHCPHYS HCPhysScratch;557 /** Ring-0 memory object for the crash-dump scratch area. */558 RTR0MEMOBJ hMemObjScratch;559 /** Pointer to the crash-dump scratch bitmap. */560 R0PTRTYPE(uint8_t *) pbScratch;561 #endif 534 /** Pointer to the shadow VMCS read-only fields array. */ 535 R0PTRTYPE(uint32_t *) paShadowVmcsRoFields; 536 /** Pointer to the shadow VMCS read/write fields array. */ 537 R0PTRTYPE(uint32_t *) paShadowVmcsFields; 538 /** Number of elements in the shadow VMCS read-only fields array. */ 539 uint32_t cShadowVmcsRoFields; 540 /** Number of elements in the shadow VMCS read-write fields array. */ 541 uint32_t cShadowVmcsFields; 562 542 563 543 /** Tagged-TLB flush type. */ … … 591 571 /** Host-physical address for a failing VMXON instruction. */ 592 572 RTHCPHYS HCPhysVmxEnableError; 593 594 /** Pointer to the shadow VMCS read-only fields array. */ 595 R0PTRTYPE(uint32_t *) paShadowVmcsRoFields; 596 /** Pointer to the shadow VMCS read/write fields array. */ 597 R0PTRTYPE(uint32_t *) paShadowVmcsFields; 598 /** Number of elements in the shadow VMCS read-only fields array. */ 599 uint32_t cShadowVmcsRoFields; 600 /** Number of elements in the shadow VMCS read-write fields array. */ 601 uint32_t cShadowVmcsFields; 573 /** Host-physical address of the APIC-access page. */ 574 RTHCPHYS HCPhysApicAccess; 575 /** Host-physical address of the VMREAD bitmap. */ 576 RTHCPHYS HCPhysVmreadBitmap; 577 /** Host-physical address of the VMWRITE bitmap. */ 578 RTHCPHYS HCPhysVmwriteBitmap; 579 #ifdef VBOX_WITH_CRASHDUMP_MAGIC 580 /** Host-physical address of the crash-dump scratch area. */ 581 RTHCPHYS HCPhysScratch; 582 #endif 583 584 #ifdef VBOX_WITH_CRASHDUMP_MAGIC 585 /** Pointer to the crash-dump scratch bitmap. */ 586 R0PTRTYPE(uint8_t *) pbScratch; 587 #endif 588 /** Virtual address of the TSS page used for real mode emulation. */ 589 R3PTRTYPE(PVBOXTSS) pRealModeTSS; 590 /** Virtual address of the identity page table used for real mode and protected 591 * mode without paging emulation in EPT mode. */ 592 R3PTRTYPE(PX86PD) pNonPagingModeEPTPageTable; 593 594 /** Ring-0 memory object for per-VM VMX structures. */ 595 RTR0MEMOBJ hMemObj; 602 596 } vmx; 603 597 … … 664 658 /** Pointer to HM VM instance data. */ 665 659 typedef HM *PHM; 666 667 660 AssertCompileMemberAlignment(HM, StatTprPatchSuccess, 8); 661 AssertCompileMemberAlignment(HM, vmx, 8); 662 AssertCompileMemberAlignment(HM, svm, 8); 663 668 664 669 665 /** … … 692 688 * guest (or nested-guest) VMCS (VM control structure) using hardware-assisted VMX. 693 689 * 694 * The members here are ordered and aligned based on estimated frequency of usage 695 * and grouped to fit within a cache line in hot code paths. 690 * Note! The members here are ordered and aligned based on estimated frequency of 691 * usage and grouped to fit within a cache line in hot code paths. Even subtle 692 * changes here have a noticeable effect in the bootsector benchmarks. Modify with 693 * care. 696 694 */ 697 695 typedef struct VMXVMCSINFO … … 807 805 /** @} */ 808 806 809 /** @name R0-memory objects address ofVMCS and related data structures.807 /** @name R0-memory objects address for VMCS and related data structures. 810 808 * @{ */ 811 /** The VMCS. */ 812 RTR0MEMOBJ hMemObjVmcs; 813 /** R0 memory object for the shadow VMCS. */ 814 RTR0MEMOBJ hMemObjShadowVmcs; 815 /** R0 memory object for the MSR bitmap. */ 816 RTR0MEMOBJ hMemObjMsrBitmap; 817 /** R0 memory object of the VM-entry MSR-load area. */ 818 RTR0MEMOBJ hMemObjGuestMsrLoad; 819 /** R0 memory object of the VM-exit MSR-store area. */ 820 RTR0MEMOBJ hMemObjGuestMsrStore; 821 /** R0 memory object for the VM-exit MSR-load area. */ 822 RTR0MEMOBJ hMemObjHostMsrLoad; 809 /** R0-memory object for VMCS and related data structures. */ 810 RTR0MEMOBJ hMemObj; 823 811 /** @} */ 824 812 … … 831 819 typedef const VMXVMCSINFO *PCVMXVMCSINFO; 832 820 AssertCompileSizeAlignment(VMXVMCSINFO, 8); 833 AssertCompileMemberAlignment(VMXVMCSINFO, pfnStartVM, 8); 834 AssertCompileMemberAlignment(VMXVMCSINFO, u32PinCtls, 4); 835 AssertCompileMemberAlignment(VMXVMCSINFO, u64VmcsLinkPtr, 8); 836 AssertCompileMemberAlignment(VMXVMCSINFO, pvVmcs, 8); 837 AssertCompileMemberAlignment(VMXVMCSINFO, HCPhysVmcs, 8); 838 AssertCompileMemberAlignment(VMXVMCSINFO, hMemObjVmcs, 8); 821 AssertCompileMemberAlignment(VMXVMCSINFO, pfnStartVM, 8); 822 AssertCompileMemberAlignment(VMXVMCSINFO, u32PinCtls, 4); 823 AssertCompileMemberAlignment(VMXVMCSINFO, u64VmcsLinkPtr, 8); 824 AssertCompileMemberAlignment(VMXVMCSINFO, pvVmcs, 8); 825 AssertCompileMemberAlignment(VMXVMCSINFO, pvShadowVmcs, 8); 826 AssertCompileMemberAlignment(VMXVMCSINFO, pbVirtApic, 8); 827 AssertCompileMemberAlignment(VMXVMCSINFO, pvMsrBitmap, 8); 828 AssertCompileMemberAlignment(VMXVMCSINFO, pvGuestMsrLoad, 8); 829 AssertCompileMemberAlignment(VMXVMCSINFO, pvGuestMsrStore, 8); 830 AssertCompileMemberAlignment(VMXVMCSINFO, pvHostMsrLoad, 8); 831 AssertCompileMemberAlignment(VMXVMCSINFO, HCPhysVmcs, 8); 832 AssertCompileMemberAlignment(VMXVMCSINFO, hMemObj, 8); 839 833 840 834 /** … … 843 837 * Note! If you change members of this struct, make sure to check if the 844 838 * assembly counterpart in HMInternal.mac needs to be updated as well. 839 * 840 * Note! The members here are ordered and aligned based on estimated frequency of 841 * usage and grouped to fit within a cache line in hot code paths. Even subtle 842 * changes here have a noticeable effect in the bootsector benchmarks. Modify with 843 * care. 845 844 */ 846 845 typedef struct HMCPU … … 848 847 /** Set when the TLB has been checked until we return from the world switch. */ 849 848 bool volatile fCheckedTLBFlush; 850 /** Set if we need to flush the TLB during the world switch. */851 bool fForceTLBFlush;852 849 /** Set when we're using VT-x or AMD-V at that moment. */ 853 850 bool fActive; … … 856 853 /** Whether we're using the hyper DR7 or guest DR7. */ 857 854 bool fUsingHyperDR7; 858 /** Set if XCR0 needs to be saved/restored when entering/exiting guest code 859 * execution. */ 860 bool fLoadSaveGuestXcr0; 861 855 856 /** Set if we need to flush the TLB during the world switch. */ 857 bool fForceTLBFlush; 862 858 /** Whether we should use the debug loop because of single stepping or special 863 859 * debug breakpoints / events are armed. */ … … 868 864 /** Set if we using the debug loop and wish to intercept RDTSC. */ 869 865 bool fDebugWantRdTscExit; 870 /** Whether we're executing a single instruction. */ 871 bool fSingleInstruction; 872 /** Set if we need to clear the trap flag because of single stepping. */ 873 bool fClearTrapFlag; 874 866 867 /** Set if XCR0 needs to be saved/restored when entering/exiting guest code 868 * execution. */ 869 bool fLoadSaveGuestXcr0; 875 870 /** Whether \#UD needs to be intercepted (required by certain GIM providers). */ 876 871 bool fGIMTrapXcptUD; 877 872 /** Whether \#GP needs to be intercept for mesa driver workaround. */ 878 873 bool fTrapXcptGpForLovelyMesaDrv; 879 uint8_t u8Alignment0[3]; 874 /** Whether we're executing a single instruction. */ 875 bool fSingleInstruction; 876 877 /** Set if we need to clear the trap flag because of single stepping. */ 878 bool fClearTrapFlag; 879 bool afAlignment0[3]; 880 880 881 881 /** World switch exit counter. */ … … 1005 1005 HMEVENT Event; 1006 1006 1007 /** The CPU ID of the CPU currently owning the VMCS. Set in 1008 * HMR0Enter and cleared in HMR0Leave. */ 1009 RTCPUID idEnteredCpu; 1010 1011 /** Current shadow paging mode for updating CR4. */ 1012 PGMMODE enmShadowMode; 1013 1007 1014 /** The PAE PDPEs used with Nested Paging (only valid when 1008 1015 * VMCPU_FF_HM_UPDATE_PAE_PDPES is set). */ 1009 1016 X86PDPE aPdpes[4]; 1010 1011 /** Current shadow paging mode for updating CR4. */1012 PGMMODE enmShadowMode;1013 1014 /** The CPU ID of the CPU currently owning the VMCS. Set in1015 * HMR0Enter and cleared in HMR0Leave. */1016 RTCPUID idEnteredCpu;1017 1017 1018 1018 /** For saving stack space, the disassembler state is allocated here instead of … … 1166 1166 /** Pointer to HM VMCPU instance data. */ 1167 1167 typedef HMCPU *PHMCPU; 1168 AssertCompileMemberAlignment(HMCPU, fCheckedTLBFlush, 4); 1169 AssertCompileMemberAlignment(HMCPU, fForceTLBFlush, 4); 1168 1170 AssertCompileMemberAlignment(HMCPU, cWorldSwitchExits, 4); 1169 AssertCompileMemberAlignment(HMCPU, fCtxChanged, 8);1171 AssertCompileMemberAlignment(HMCPU, fCtxChanged, 8); 1170 1172 AssertCompileMemberAlignment(HMCPU, HM_UNION_NM(u.) vmx, 8); 1173 AssertCompileMemberAlignment(HMCPU, HM_UNION_NM(u.) vmx.VmcsInfo, 8); 1174 AssertCompileMemberAlignment(HMCPU, HM_UNION_NM(u.) vmx.VmcsInfoNstGst, 8); 1175 AssertCompileMemberAlignment(HMCPU, HM_UNION_NM(u.) vmx.RestoreHost, 8); 1171 1176 AssertCompileMemberAlignment(HMCPU, HM_UNION_NM(u.) svm, 8); 1172 1177 AssertCompileMemberAlignment(HMCPU, Event, 8); -
trunk/src/VBox/VMM/include/HMInternal.mac
r80150 r81637 17 17 18 18 struc HMCPU 19 .fCheckedTLBFlush resb 1 20 .fForceTLBFlush resb 1 21 .fActive resb 1 22 .fLeaveDone resb 1 23 .fUsingHyperDR7 resb 1 24 .fLoadSaveGuestXcr0 resb 1 19 .fCheckedTLBFlush resb 1 20 .fActive resb 1 21 .fLeaveDone resb 1 22 .fUsingHyperDR7 resb 1 23 .fForceTLBFlush resb 1 24 .fUseDebugLoop resb 1 25 .fUsingDebugLoop resb 1 26 .fDebugWantRdTscExit resb 1 25 27 26 .fUseDebugLoop resb 1 27 .fUsingDebugLoop resb 1 28 .fDebugWantRdTscExit resb 1 29 .fSingleInstruction resb 1 30 .fClearTrapFlag resb 1 31 32 .fGIMTrapXcptUD resb 1 33 .fTrapXcptGpForLovelyMesaDrv resb 1 28 .fLoadSaveGuestXcr0 resb 1 29 .fGIMTrapXcptUD resb 1 30 .fTrapXcptGpForLovelyMesaDrv resb 1 31 .fSingleInstruction resb 1 32 .fClearTrapFlag resb 1 34 33 alignb 8 35 34 36 .cWorldSwitchExits resd 137 .idLastCpu resd 138 .cTlbFlushes resd 139 .uCurrentAsid resd 140 .u32HMError resd 141 alignb 842 .fCtxChanged resq 135 .cWorldSwitchExits resd 1 36 .idLastCpu resd 1 37 .cTlbFlushes resd 1 38 .uCurrentAsid resd 1 39 .u32HMError resd 1 40 .rcLastExitToR3 resd 1 41 .fCtxChanged resq 1 43 42 44 43 ; incomplete to save unnecessary pain...
Note:
See TracChangeset
for help on using the changeset viewer.