- Timestamp:
- Apr 12, 2013 1:17:01 AM (12 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r45502 r45503 1236 1236 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR) 1237 1237 { 1238 for (u nsignedi = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)1238 for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++) 1239 1239 hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]); 1240 1240 } … … 1322 1322 } 1323 1323 1324 pVCpu->hm.s.TlbShootdown.cPages = 0;1324 pVCpu->hm.s.TlbShootdown.cPages = 0; 1325 1325 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN); 1326 1326 } … … 1402 1402 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR) 1403 1403 { 1404 for (u nsignedi = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)1404 for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++) 1405 1405 hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]); 1406 1406 } … … 4652 4652 pVCpu->hm.s.Event.u64IntrInfo = u32IntrInfo; 4653 4653 pVCpu->hm.s.Event.u32ErrCode = u32ErrCode; 4654 pVCpu->hm.s.Event. u32InstrLen= cbInstr;4654 pVCpu->hm.s.Event.cbInstr = cbInstr; 4655 4655 } 4656 4656 … … 5816 5816 { 5817 5817 Log(("Injecting pending event\n")); 5818 int rc = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, pVCpu->hm.s.Event.u64IntrInfo, pVCpu->hm.s.Event. u32InstrLen,5818 int rc = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, pVCpu->hm.s.Event.u64IntrInfo, pVCpu->hm.s.Event.cbInstr, 5819 5819 pVCpu->hm.s.Event.u32ErrCode); 5820 5820 AssertRCReturn(rc, rc); -
trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp
r45387 r45503 1181 1181 /* Deal with pending TLB shootdown actions which were queued when we were not executing code. */ 1182 1182 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown); 1183 for (u nsignedi = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)1183 for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++) 1184 1184 SVMR0InvlpgA(pVCpu->hm.s.TlbShootdown.aPages[i], pvVMCB->ctrl.TLBCtrl.n.u32ASID); 1185 1185 } -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r45478 r45503 2610 2610 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN); 2611 2611 pVCpu->hm.s.TlbShootdown.cPages = 0; 2612 return;2613 2612 } 2614 2613 … … 2697 2696 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR) 2698 2697 { 2699 for (u nsignedi = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)2698 for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++) 2700 2699 hmR0VmxFlushVPID(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]); 2701 2700 } … … 2775 2774 } 2776 2775 } 2777 pVCpu->hm.s.TlbShootdown.cPages = 0;2776 pVCpu->hm.s.TlbShootdown.cPages = 0; 2778 2777 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN); 2779 2778 … … 2860 2859 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR) 2861 2860 { 2862 for (u nsignedi = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)2861 for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++) 2863 2862 hmR0VmxFlushVPID(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]); 2864 2863 } -
trunk/src/VBox/VMM/include/HMInternal.h
r45502 r45503 582 582 uint32_t u32Alignment; 583 583 584 /* Host's TSC_AUX MSR (used when RDTSCP doesn't cause VM-exits). */584 /** Host's TSC_AUX MSR (used when RDTSCP doesn't cause VM-exits). */ 585 585 uint64_t u64HostTscAux; 586 586 … … 655 655 R0PTRTYPE(void *) pvHostMsr; 656 656 657 /* Number of automatically loaded/restored guest MSRs during the world switch. */657 /** Number of automatically loaded/restored guest MSRs during the world switch. */ 658 658 uint32_t cGuestMsrs; 659 659 uint32_t uAlignment; 660 660 #endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */ 661 661 662 /* The cached APIC-base MSR used for identifying when to map the HC physical APIC-access page. */662 /** The cached APIC-base MSR used for identifying when to map the HC physical APIC-access page. */ 663 663 uint64_t u64MsrApicBase; 664 /* Last use TSC offset value. (cached) */664 /** Last use TSC offset value. (cached) */ 665 665 uint64_t u64TSCOffset; 666 666 /** VMCS cache. */ … … 731 731 uint32_t fPending; 732 732 uint32_t u32ErrCode; 733 uint32_t u32InstrLen; 733 uint32_t cbInstr; 734 uint32_t u32Padding; /**< Explicit alignment padding. */ 734 735 uint64_t u64IntrInfo; 735 736 } Event; … … 749 750 struct 750 751 { 751 /* Pending IO operation type. */752 HMPENDINGIO enmType;752 /** Pending IO operation type. */ 753 HMPENDINGIO enmType; 753 754 uint32_t uPadding; 754 755 RTGCPTR GCPtrRip; … … 758 759 struct 759 760 { 760 u nsigneduPort;761 u nsigneduAndVal;762 u nsignedcbSize;761 uint32_t uPort; 762 uint32_t uAndVal; 763 uint32_t cbSize; 763 764 } Port; 764 765 uint64_t aRaw[2]; … … 781 782 { 782 783 RTGCPTR aPages[HM_MAX_TLB_SHOOTDOWN_PAGES]; 783 unsigned cPages; 784 uint32_t cPages; 785 uint32_t u32Padding; /**< Explicit alignment padding. */ 784 786 } TlbShootdown; 785 787 … … 788 790 DISCPUSTATE DisState; 789 791 790 uint32_t padding2[1];791 792 792 STAMPROFILEADV StatEntry; 793 793 STAMPROFILEADV StatExit1; 794 794 STAMPROFILEADV StatExit2; 795 #ifdef VBOX_WITH_OLD_VTX_CODE /* temporaryfor tracking down darwin issues. */795 #ifdef VBOX_WITH_OLD_VTX_CODE /* "temporary" for tracking down darwin issues. */ 796 796 STAMPROFILEADV StatExit2Sub1; 797 797 STAMPROFILEADV StatExit2Sub2; -
trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp
r45152 r45503 403 403 CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.HCPhysVmcs, sizeof(RTHCPHYS)); 404 404 CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.u32PinCtls, 8); 405 CHECK_MEMBER_ALIGNMENT(HMCPU, DisState, 8); 405 406 CHECK_MEMBER_ALIGNMENT(HMCPU, Event.u64IntrInfo, 8); 406 407
Note:
See TracChangeset
for help on using the changeset viewer.