Changeset 45091 in vbox
- Timestamp:
- Mar 19, 2013 4:01:32 PM (12 years ago)
- svn:sync-xref-src-repo-rev:
- 84380
- Location:
- trunk
- Files:
-
- 13 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Config.kmk
r45063 r45091 359 359 # TODO: Eliminate VBOX_WITH_PDM_LOCK. 360 360 VBOX_WITH_PDM_LOCK = 1 361 # Enable the use of the old VT-x code. 362 VBOX_WITH_OLD_VTX_CODE = 1 361 363 ## @} 362 364 -
trunk/include/VBox/err.h
r45086 r45091 1120 1120 /** Internal processing error \#1 in the switcher code. */ 1121 1121 #define VERR_VMM_SWITCHER_IPE_1 (-2713) 1122 /** Reason for leaving RZ: Unknown call to ring-3. */ 1123 #define VINF_VMM_UNKNOWN_RING3_CALL (2714) 1122 1124 /** @} */ 1123 1125 -
trunk/include/VBox/vmm/hm_vmx.h
r44791 r45091 1629 1629 #endif 1630 1630 1631 #if HC_ARCH_BITS == 64 1632 #define VMXWriteVmcs VMXWriteVmcs64 1633 #else 1634 #define VMXWriteVmcs VMXWriteVmcs32 1635 #endif /* HC_ARCH_BITS == 64 */ 1631 #ifdef VBOX_WITH_OLD_VTX_CODE 1632 # if HC_ARCH_BITS == 64 1633 # define VMXWriteVmcs VMXWriteVmcs64 1634 # else 1635 # define VMXWriteVmcs VMXWriteVmcs32 1636 # endif 1637 #else /* !VBOX_WITH_OLD_VTX_CODE */ 1638 # if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 1639 # define VMXWriteVmcsHstN VMXWriteVmcs64 1640 # else 1641 # define VMXWriteVmcsHstN VMXWriteVmcs32 1642 # endif 1643 # define VMXWriteVmcsGstN VMXWriteVmcs64 1644 #endif 1636 1645 1637 1646 … … 1731 1740 1732 1741 # if HC_ARCH_BITS == 64 1733 # define VMXReadVmcs VMXReadVmcs641742 # define VMXReadVmcsField VMXReadVmcs64 1734 1743 # else 1735 # define VMXReadVmcs VMXReadVmcs321744 # define VMXReadVmcsField VMXReadVmcs32 1736 1745 # endif 1737 1746 -
trunk/include/VBox/vmm/vmm.h
r44394 r45091 111 111 VMMCALLRING3_32BIT_HACK = 0x7fffffff 112 112 } VMMCALLRING3; 113 114 /** 115 * VMMRZCallRing3 notification callback. 116 * 117 * @param pVCpu Pointer to the VMCPU. 118 * @param enmOperation The operation causing the ring-3 jump. 119 * @param pvUser The user argument. 120 */ 121 typedef DECLCALLBACK(void) FNVMMR0CALLRING3NOTIFICATION(PVMCPU pVCpu, VMMCALLRING3 enmOperation, void *pvUser); 122 /** Pointer to a FNRTMPNOTIFICATION(). */ 123 typedef FNVMMR0CALLRING3NOTIFICATION *PFNVMMR0CALLRING3NOTIFICATION; 113 124 114 125 /** … … 504 515 VMMRZDECL(void) VMMRZCallRing3Enable(PVMCPU pVCpu); 505 516 VMMRZDECL(bool) VMMRZCallRing3IsEnabled(PVMCPU pVCpu); 517 VMMRZDECL(int) VMMRZCallRing3SetNotification(PVMCPU pVCpu, PFNVMMR0CALLRING3NOTIFICATION pfnCallback, void *pvUser); 518 VMMRZDECL(void) VMMRZCallRing3RemoveNotification(PVMCPU pVCpu); 506 519 /** @} */ 507 520 #endif -
trunk/src/VBox/VMM/Makefile.kmk
r44528 r45091 53 53 ifdef VBOX_WITH_DTRACE_RC 54 54 VMM_COMMON_DEFS += VBOX_WITH_DTRACE_RC 55 endif 56 ifdef VBOX_WITH_OLD_VTX_CODE 57 VMM_COMMON_DEFS += VBOX_WITH_OLD_VTX_CODE 55 58 endif 56 59 # VMM_COMMON_DEFS += VBOX_WITH_NS_ACCOUNTING_STATS … … 492 495 VMMR0/HMR0A.asm \ 493 496 VMMR0/HWSVMR0.cpp \ 494 VMMR0/HWVMXR0.cpp \495 497 VMMR0/PDMR0Device.cpp \ 496 498 VMMR0/PDMR0Driver.cpp \ … … 551 553 VMMAll/PDMAllNetShaper.cpp 552 554 endif 555 ifdef VBOX_WITH_OLD_VTX_CODE 556 VMMR0_SOURCES += VMMR0/HWVMXR0.cpp 557 else 558 VMMR0_SOURCES += VMMR0/HMVMXR0.cpp 559 endif 553 560 VMMR0_SOURCES.amd64 = \ 554 561 VMMR0/VMMR0JmpA-amd64.asm -
trunk/src/VBox/VMM/VMMR0/HMR0.cpp
r44577 r45091 505 505 if (g_HvmR0.vmx.fSupported) 506 506 { 507 /* Call the global VT-x initialization routine. */ 508 rc = VMXR0GlobalInit(); 509 if (RT_FAILURE(rc)) 510 g_HvmR0.lLastError = rc; 511 507 512 /* 508 513 * Install the VT-x methods. … … 523 528 * Timer Does Not Count Down at the Rate Specified" erratum. 524 529 */ 525 if ( g_HvmR0.vmx.msr.vmx_pin_ctls.n.allowed1 526 & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER) 530 if (g_HvmR0.vmx.msr.vmx_pin_ctls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER) 527 531 { 528 532 g_HvmR0.vmx.fUsePreemptTimer = true; … … 767 771 } 768 772 } 773 774 /** @todo This needs cleaning up. There's no matching hmR0TermIntel() and all 775 * the VT-x/AMD-V specific bits should move into their respective 776 * modules. */ 777 /* Finally, call global VT-x/AMD-V termination. */ 778 if (g_HvmR0.vmx.fSupported) 779 VMXR0GlobalTerm(); 780 769 781 return rc; 770 782 } -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r44968 r45091 209 209 } 210 210 211 VMMR0DECL(int) VMXR0GlobalInit(void) 212 { 213 /* Nothing to do. */ 214 return VINF_SUCCESS; 215 } 216 217 VMMR0DECL(void) VMXR0GlobalTerm(void) 218 { 219 /* Nothing to do. */ 220 } 211 221 212 222 /** … … 530 540 rc = VMXWriteVmcs(VMX_VMCS32_CTRL_PIN_EXEC_CONTROLS, val); 531 541 AssertRC(rc); 542 pVCpu->hm.s.vmx.u32PinCtls = val; 532 543 533 544 /* -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.h
r44528 r45091 135 135 136 136 /** 137 * Does Ring-0 global VT-x initialization. 138 * 139 * @returns VBox status code. 140 * @param pVM Pointer to the VM. 141 */ 142 VMMR0DECL(int) VMXR0GlobalInit(void); 143 144 /** 145 * Does Ring-0 global VT-x termination. 146 * 147 * @returns VBox status code. 148 * @param pVM Pointer to the VM. 149 */ 150 VMMR0DECL(void) VMXR0GlobalTerm(void); 151 152 /** 137 153 * Does Ring-0 per VM VT-x initialization. 138 154 * … … 299 315 #endif 300 316 301 /** 302 * Return value of cached VMCS read for performance reasons (Darwin) and for 303 * running 64 bits guests on 32-bit hosts. 304 * 305 * @param idxField VMCS field index. 306 * @param pVal Value pointer (out). 307 */ 308 #ifdef VMX_USE_CACHED_VMCS_ACCESSES 309 # define VMXReadCachedVmcs(idxField, pVal) VMXReadCachedVmcsEx(pVCpu, idxField##_CACHE_IDX, pVal) 317 #ifdef VBOX_WITH_OLD_VTX_CODE 318 # ifdef VMX_USE_CACHED_VMCS_ACCESSES 319 # define VMXReadCachedVmcs(idxField, pVal) VMXReadCachedVmcsEx(pVCpu, idxField##_CACHE_IDX, pVal) 320 # else 321 # define VMXReadCachedVmcs VMXReadVmcsField 322 # endif 323 # define VMXReadVmcs VMXReadVmcsField 310 324 #else 311 # define VMXReadCachedVmcs(idxField, pVal) VMXReadVmcs(idxField, pVal) 325 # if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 326 # define VMXReadVmcsHstN VMXReadVmcs64 327 # define VMXReadVmcsGstN VMXReadVmcs64 328 # else 329 # define VMXReadVmcsHstN VMXReadVmcs32 330 # define VMXReadVmcsGstN(idxField, pVal) VMXReadCachedVmcsEx(pVCpu, idxField##_CACHE_IDX, pVal) 331 # endif 312 332 #endif 313 333 -
trunk/src/VBox/VMM/VMMR3/HM.cpp
r44791 r45091 269 269 #endif /* VBOX_WITH_STATISTICS */ 270 270 271 #define VMX_REPORT_FEATURE(allowed1, disallowed0, featflag) \ 272 do { \ 273 if ((allowed1) & (featflag)) \ 274 LogRel(("HM: " #featflag "\n")); \ 275 else \ 276 LogRel(("HM: " #featflag " *must* be cleared\n")); \ 277 if ((disallowed0) & (featflag)) \ 278 LogRel(("HM: " #featflag " *must* be set\n")); \ 279 } while(0) 280 281 271 282 /******************************************************************************* 272 283 * Internal Functions * … … 826 837 { 827 838 uint64_t val; 839 uint64_t zap; 828 840 RTGCPHYS GCPhys = 0; 829 841 … … 839 851 LogRel(("HM: MSR_IA32_VMX_PINBASED_CTLS = %RX64\n", pVM->hm.s.vmx.msr.vmx_pin_ctls.u)); 840 852 val = pVM->hm.s.vmx.msr.vmx_pin_ctls.n.allowed1; 841 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT) 842 LogRel(("HM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT\n")); 843 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT) 844 LogRel(("HM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT\n")); 845 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI) 846 LogRel(("HM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI\n")); 847 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER) 848 LogRel(("HM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER\n")); 849 val = pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0; 850 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT) 851 LogRel(("HM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT *must* be set\n")); 852 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT) 853 LogRel(("HM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT *must* be set\n")); 854 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI) 855 LogRel(("HM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI *must* be set\n")); 856 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER) 857 LogRel(("HM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER *must* be set\n")); 853 zap = pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0; 854 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT); 855 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT); 856 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI); 857 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER); 858 858 859 859 LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS = %RX64\n", pVM->hm.s.vmx.msr.vmx_proc_ctls.u)); 860 860 val = pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1; 861 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT) 862 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT\n")); 863 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET) 864 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET\n")); 865 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT) 866 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT\n")); 867 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT) 868 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT\n")); 869 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT) 870 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT\n")); 871 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT) 872 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT\n")); 873 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT) 874 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT\n")); 875 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT) 876 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT\n")); 877 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT) 878 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT\n")); 879 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT) 880 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT\n")); 881 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT) 882 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT\n")); 883 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW) 884 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW\n")); 885 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT) 886 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT\n")); 887 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT) 888 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT\n")); 889 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT) 890 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT\n")); 891 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS) 892 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS\n")); 893 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG) 894 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG\n")); 895 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS) 896 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS\n")); 897 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT) 898 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT\n")); 899 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT) 900 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT\n")); 901 if (val & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL) 902 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL\n")); 903 904 val = pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0; 905 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT) 906 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT *must* be set\n")); 907 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET) 908 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET *must* be set\n")); 909 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT) 910 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT *must* be set\n")); 911 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT) 912 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT *must* be set\n")); 913 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT) 914 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT *must* be set\n")); 915 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT) 916 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT *must* be set\n")); 917 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT) 918 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT *must* be set\n")); 919 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT) 920 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT *must* be set\n")); 921 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT) 922 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT *must* be set\n")); 923 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT) 924 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT *must* be set\n")); 925 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT) 926 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT *must* be set\n")); 927 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW) 928 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW *must* be set\n")); 929 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT) 930 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT *must* be set\n")); 931 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT) 932 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT *must* be set\n")); 933 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT) 934 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT *must* be set\n")); 935 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS) 936 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS *must* be set\n")); 937 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG) 938 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG *must* be set\n")); 939 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS) 940 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS *must* be set\n")); 941 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT) 942 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT *must* be set\n")); 943 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT) 944 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT *must* be set\n")); 945 if (val & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL) 946 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL *must* be set\n")); 947 861 zap = pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0; 862 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT); 863 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET); 864 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT); 865 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT); 866 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT); 867 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT); 868 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT); 869 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT); 870 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT); 871 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT); 872 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT); 873 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW); 874 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT); 875 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT); 876 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT); 877 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS); 878 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG); 879 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS); 880 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT); 881 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT); 882 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL); 948 883 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL) 949 884 { 950 885 LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS2 = %RX64\n", pVM->hm.s.vmx.msr.vmx_proc_ctls2.u)); 951 886 val = pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1; 952 if (val & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC) 953 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC\n")); 954 if (val & VMX_VMCS_CTRL_PROC_EXEC2_EPT) 955 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_EPT\n")); 956 if (val & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT) 957 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT\n")); 958 if (val & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP) 959 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP\n")); 960 if (val & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_X2APIC) 961 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_VIRT_X2APIC\n")); 962 if (val & VMX_VMCS_CTRL_PROC_EXEC2_VPID) 963 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_VPID\n")); 964 if (val & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT) 965 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT\n")); 966 if (val & VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST) 967 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST\n")); 968 if (val & VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT) 969 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT\n")); 970 971 val = pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0; 972 if (val & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC) 973 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC *must* be set\n")); 974 if (val & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT) 975 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT *must* be set\n")); 976 if (val & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP) 977 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP *must* be set\n")); 978 if (val & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_X2APIC) 979 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_VIRT_X2APIC *must* be set\n")); 980 if (val & VMX_VMCS_CTRL_PROC_EXEC2_EPT) 981 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_EPT *must* be set\n")); 982 if (val & VMX_VMCS_CTRL_PROC_EXEC2_VPID) 983 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_VPID *must* be set\n")); 984 if (val & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT) 985 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT *must* be set\n")); 986 if (val & VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST) 987 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST *must* be set\n")); 988 if (val & VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT) 989 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT *must* be set\n")); 887 zap = pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0; 888 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC); 889 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC2_EPT); 890 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT); 891 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP); 892 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC2_VIRT_X2APIC); 893 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC2_VPID); 894 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT); 895 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST); 896 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT); 897 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT); 898 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC2_INVPCID); 899 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC2_VMFUNC); 990 900 } 991 901 992 902 LogRel(("HM: MSR_IA32_VMX_ENTRY_CTLS = %RX64\n", pVM->hm.s.vmx.msr.vmx_entry.u)); 993 903 val = pVM->hm.s.vmx.msr.vmx_entry.n.allowed1; 994 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG) 995 LogRel(("HM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG\n")); 996 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_IA32E_MODE_GUEST) 997 LogRel(("HM: VMX_VMCS_CTRL_ENTRY_CONTROLS_IA32E_MODE_GUEST\n")); 998 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM) 999 LogRel(("HM: VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM\n")); 1000 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON) 1001 LogRel(("HM: VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON\n")); 1002 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR) 1003 LogRel(("HM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR\n")); 1004 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR) 1005 LogRel(("HM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR\n")); 1006 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR) 1007 LogRel(("HM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR\n")); 1008 val = pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0; 1009 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG) 1010 LogRel(("HM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG *must* be set\n")); 1011 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_IA32E_MODE_GUEST) 1012 LogRel(("HM: VMX_VMCS_CTRL_ENTRY_CONTROLS_IA32E_MODE_GUEST *must* be set\n")); 1013 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM) 1014 LogRel(("HM: VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM *must* be set\n")); 1015 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON) 1016 LogRel(("HM: VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON *must* be set\n")); 1017 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR) 1018 LogRel(("HM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR *must* be set\n")); 1019 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR) 1020 LogRel(("HM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR *must* be set\n")); 1021 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR) 1022 LogRel(("HM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR *must* be set\n")); 904 zap = pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0; 905 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG); 906 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_ENTRY_CONTROLS_IA32E_MODE_GUEST); 907 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM); 908 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON); 909 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR); 910 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR); 911 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR); 1023 912 1024 913 LogRel(("HM: MSR_IA32_VMX_EXIT_CTLS = %RX64\n", pVM->hm.s.vmx.msr.vmx_exit.u)); 1025 914 val = pVM->hm.s.vmx.msr.vmx_exit.n.allowed1; 1026 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG) 1027 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG\n")); 1028 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE) 1029 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE\n")); 1030 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXT_INT) 1031 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXT_INT\n")); 1032 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR) 1033 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR\n")); 1034 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR) 1035 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR\n")); 1036 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR) 1037 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR\n")); 1038 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR) 1039 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR\n")); 1040 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER) 1041 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER\n")); 1042 val = pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0; 1043 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG) 1044 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG *must* be set\n")); 1045 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE) 1046 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE *must* be set\n")); 1047 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXT_INT) 1048 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXT_INT *must* be set\n")); 1049 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR) 1050 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR *must* be set\n")); 1051 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR) 1052 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR *must* be set\n")); 1053 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR) 1054 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR *must* be set\n")); 1055 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR) 1056 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR *must* be set\n")); 1057 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER) 1058 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER *must* be set\n")); 915 zap = pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0; 916 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG); 917 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE); 918 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_PERF_MSR); 919 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXT_INT); 920 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR); 921 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR); 922 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR); 923 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR); 924 VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER); 1059 925 1060 926 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps) … … 1263 1129 if (pVM->hm.s.fNestedPaging) 1264 1130 { 1265 LogRel(("HM: Enabled nested paging\n"));1131 LogRel(("HM: Nested paging enabled!\n")); 1266 1132 LogRel(("HM: EPT root page = %RHp\n", PGMGetHyperCR3(VMMGetCpu(pVM)))); 1267 1133 if (pVM->hm.s.vmx.enmFlushEpt == VMX_FLUSH_EPT_SINGLE_CONTEXT) 1268 LogRel(("HM: enmFlushEpt= VMX_FLUSH_EPT_SINGLE_CONTEXT\n"));1134 LogRel(("HM: EPT flush type = VMX_FLUSH_EPT_SINGLE_CONTEXT\n")); 1269 1135 else if (pVM->hm.s.vmx.enmFlushEpt == VMX_FLUSH_EPT_ALL_CONTEXTS) 1270 LogRel(("HM: enmFlushEpt= VMX_FLUSH_EPT_ALL_CONTEXTS\n"));1136 LogRel(("HM: EPT flush type = VMX_FLUSH_EPT_ALL_CONTEXTS\n")); 1271 1137 else if (pVM->hm.s.vmx.enmFlushEpt == VMX_FLUSH_EPT_NOT_SUPPORTED) 1272 LogRel(("HM: enmFlushEpt= VMX_FLUSH_EPT_NOT_SUPPORTED\n"));1138 LogRel(("HM: EPT flush type = VMX_FLUSH_EPT_NOT_SUPPORTED\n")); 1273 1139 else 1274 LogRel(("HM: enmFlushEpt= %d\n", pVM->hm.s.vmx.enmFlushEpt));1140 LogRel(("HM: EPT flush type = %d\n", pVM->hm.s.vmx.enmFlushEpt)); 1275 1141 1276 1142 if (pVM->hm.s.vmx.fUnrestrictedGuest) … … 1291 1157 if (pVM->hm.s.vmx.fVpid) 1292 1158 { 1293 LogRel(("HM: Enabled VPID\n"));1159 LogRel(("HM: VPID enabled!\n")); 1294 1160 if (pVM->hm.s.vmx.enmFlushVpid == VMX_FLUSH_VPID_INDIV_ADDR) 1295 LogRel(("HM: enmFlushVpid= VMX_FLUSH_VPID_INDIV_ADDR\n"));1161 LogRel(("HM: VPID flush type = VMX_FLUSH_VPID_INDIV_ADDR\n")); 1296 1162 else if (pVM->hm.s.vmx.enmFlushVpid == VMX_FLUSH_VPID_SINGLE_CONTEXT) 1297 LogRel(("HM: enmFlushVpid= VMX_FLUSH_VPID_SINGLE_CONTEXT\n"));1163 LogRel(("HM: VPID flush type = VMX_FLUSH_VPID_SINGLE_CONTEXT\n")); 1298 1164 else if (pVM->hm.s.vmx.enmFlushVpid == VMX_FLUSH_VPID_ALL_CONTEXTS) 1299 LogRel(("HM: enmFlushVpid= VMX_FLUSH_VPID_ALL_CONTEXTS\n"));1165 LogRel(("HM: VPID flush type = VMX_FLUSH_VPID_ALL_CONTEXTS\n")); 1300 1166 else if (pVM->hm.s.vmx.enmFlushVpid == VMX_FLUSH_VPID_SINGLE_CONTEXT_RETAIN_GLOBALS) 1301 LogRel(("HM: enmFlushVpid= VMX_FLUSH_VPID_SINGLE_CONTEXT_RETAIN_GLOBALS\n"));1167 LogRel(("HM: VPID flush type = VMX_FLUSH_VPID_SINGLE_CONTEXT_RETAIN_GLOBALS\n")); 1302 1168 else 1303 LogRel(("HM: enmFlushVpid= %d\n", pVM->hm.s.vmx.enmFlushVpid));1169 LogRel(("HM: VPID flush type = %d\n", pVM->hm.s.vmx.enmFlushVpid)); 1304 1170 } 1305 1171 else if (pVM->hm.s.vmx.enmFlushVpid == VMX_FLUSH_VPID_NOT_SUPPORTED) … … 1438 1304 if (pVM->hm.s.fNestedPaging) 1439 1305 { 1440 LogRel(("HM: Enabled nested paging \n"));1306 LogRel(("HM: Enabled nested paging!\n")); 1441 1307 #if HC_ARCH_BITS == 64 1442 1308 if (pVM->hm.s.fLargePages) … … 1580 1446 pVCpu->hm.s.enmShadowMode = enmShadowMode; 1581 1447 1448 #ifdef VBOX_WITH_OLD_VTX_CODE 1582 1449 if ( pVM->hm.s.vmx.fEnabled 1583 1450 && pVM->fHMEnabled) … … 1595 1462 } 1596 1463 } 1464 #endif 1597 1465 1598 1466 if (pVCpu->hm.s.vmx.enmCurrGuestMode != enmGuestMode) … … 1611 1479 } 1612 1480 1481 /** @todo r=ramshankar: Why do we need to do this? Most likely 1482 * VBOX_WITH_OLD_VTX_CODE only. */ 1613 1483 /* Reset the contents of the read cache. */ 1614 1484 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache; … … 2731 2601 2732 2602 case VERR_VMX_INVALID_VMCS_PTR: 2733 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current pointer %RGp vs %RGp\n", i, pVM->aCpus[i].hm.s.vmx.lasterror.u64VMCSPhys, pVM->aCpus[i].hm.s.vmx.HCPhysVmcs)); 2734 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current VMCS version %x\n", i, pVM->aCpus[i].hm.s.vmx.lasterror.u32VMCSRevision)); 2735 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Entered Cpu %d\n", i, pVM->aCpus[i].hm.s.vmx.lasterror.idEnteredCpu)); 2736 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current Cpu %d\n", i, pVM->aCpus[i].hm.s.vmx.lasterror.idCurrentCpu)); 2603 LogRel(("HM: VERR_VMX_INVALID_VMCS_PTR:\n")); 2604 LogRel(("HM: CPU%d Current pointer %RGp vs %RGp\n", i, pVM->aCpus[i].hm.s.vmx.lasterror.u64VMCSPhys, pVM->aCpus[i].hm.s.vmx.HCPhysVmcs)); 2605 LogRel(("HM: CPU%d Current VMCS version %x\n", i, pVM->aCpus[i].hm.s.vmx.lasterror.u32VMCSRevision)); 2606 LogRel(("HM: CPU%d Entered Cpu %d\n", i, pVM->aCpus[i].hm.s.vmx.lasterror.idEnteredCpu)); 2607 LogRel(("HM: CPU%d Current Cpu %d\n", i, pVM->aCpus[i].hm.s.vmx.lasterror.idCurrentCpu)); 2737 2608 break; 2738 2609 2739 2610 case VERR_VMX_UNABLE_TO_START_VM: 2740 LogRel(("VERR_VMX_UNABLE_TO_START_VM: CPU%d instruction error %x\n", i, pVM->aCpus[i].hm.s.vmx.lasterror.u32InstrError)); 2741 LogRel(("VERR_VMX_UNABLE_TO_START_VM: CPU%d exit reason %x\n", i, pVM->aCpus[i].hm.s.vmx.lasterror.u32ExitReason)); 2611 LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM:\n")); 2612 LogRel(("HM: CPU%d instruction error %#x\n", i, pVM->aCpus[i].hm.s.vmx.lasterror.u32InstrError)); 2613 LogRel(("HM: CPU%d exit reason %#x\n", i, pVM->aCpus[i].hm.s.vmx.lasterror.u32ExitReason)); 2742 2614 if (pVM->aCpus[i].hm.s.vmx.lasterror.u32InstrError == VMX_ERROR_VMENTRY_INVALID_CONTROL_FIELDS) 2743 2615 { 2744 LogRel(("VERR_VMX_UNABLE_TO_START_VM: Cpu%d MSRBitmapPhys %RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysMsrBitmap)); 2616 LogRel(("HM: Cpu%d PinCtls %#RX32\n", i, pVM->aCpus[i].hm.s.vmx.u32PinCtls)); 2617 LogRel(("HM: Cpu%d ProcCtls %#RX32\n", i, pVM->aCpus[i].hm.s.vmx.u32ProcCtls)); 2618 LogRel(("HM: Cpu%d ProcCtls2 %#RX32\n", i, pVM->aCpus[i].hm.s.vmx.u32ProcCtls2)); 2619 LogRel(("HM: Cpu%d EntryCtls %#RX32\n", i, pVM->aCpus[i].hm.s.vmx.u32EntryCtls)); 2620 LogRel(("HM: Cpu%d ExitCtls %#RX32\n", i, pVM->aCpus[i].hm.s.vmx.u32ExitCtls)); 2621 LogRel(("HM: Cpu%d MSRBitmapPhys %RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysMsrBitmap)); 2745 2622 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 2746 LogRel((" VERR_VMX_UNABLE_TO_START_VM: Cpu%d GuestMSRPhys %RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysGuestMsr));2747 LogRel((" VERR_VMX_UNABLE_TO_START_VM: Cpu%d HostMsrPhys %RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysHostMsr));2748 LogRel((" VERR_VMX_UNABLE_TO_START_VM: Cpu%d cGuestMSRs %x\n", i, pVM->aCpus[i].hm.s.vmx.cGuestMsrs));2623 LogRel(("HM: Cpu%d GuestMSRPhys %RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysGuestMsr)); 2624 LogRel(("HM: Cpu%d HostMsrPhys %RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysHostMsr)); 2625 LogRel(("HM: Cpu%d cGuestMSRs %u\n", i, pVM->aCpus[i].hm.s.vmx.cGuestMsrs)); 2749 2626 #endif 2750 2627 } … … 2755 2632 2756 2633 case VERR_VMX_UNABLE_TO_RESUME_VM: 2757 LogRel(("VERR_VMX_UNABLE_TO_RESUME_VM: CPU%d instruction error %x\n", i, pVM->aCpus[i].hm.s.vmx.lasterror.u32InstrError)); 2758 LogRel(("VERR_VMX_UNABLE_TO_RESUME_VM: CPU%d exit reason %x\n", i, pVM->aCpus[i].hm.s.vmx.lasterror.u32ExitReason)); 2634 LogRel(("HM: VERR_VMX_UNABLE_TO_RESUME_VM:\n")); 2635 LogRel(("HM: CPU%d instruction error %x\n", i, pVM->aCpus[i].hm.s.vmx.lasterror.u32InstrError)); 2636 LogRel(("HM: CPU%d exit reason %x\n", i, pVM->aCpus[i].hm.s.vmx.lasterror.u32ExitReason)); 2759 2637 break; 2760 2638 -
trunk/src/VBox/VMM/VMMRZ/VMMRZ.cpp
r44529 r45091 86 86 pVM->vmm.s.pfnRCToHost(VINF_VMM_CALL_HOST); 87 87 #else 88 if (pVCpu->vmm.s.pfnCallRing3CallbackR0) 89 pVCpu->vmm.s.pfnCallRing3CallbackR0(pVCpu, enmOperation, pVCpu->vmm.s.pvCallRing3CallbackUserR0); 88 90 int rc = vmmR0CallRing3LongJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, VINF_VMM_CALL_HOST); 89 91 if (RT_FAILURE(rc)) … … 175 177 } 176 178 179 180 /** 181 * Sets the ring-0 callback before doing the ring-3 call. 182 * 183 * @param pVCpu Pointer to the VMCPU. 184 * @param pfnCallback Pointer to the callback. 185 * @param pvUser The user argument. 186 * 187 * @return VBox status code. 188 */ 189 VMMRZDECL(int) VMMRZCallRing3SetNotification(PVMCPU pVCpu, PFNVMMR0CALLRING3NOTIFICATION pfnCallback, void *pvUser) 190 { 191 AssertReturn(pVCpu, VERR_INVALID_POINTER); 192 AssertReturn(pfnCallback, VERR_INVALID_POINTER); 193 194 if (pVCpu->vmm.s.pfnCallRing3CallbackR0) 195 return VERR_ALREADY_EXISTS; 196 197 pVCpu->vmm.s.pfnCallRing3CallbackR0 = pfnCallback; 198 pVCpu->vmm.s.pvCallRing3CallbackUserR0 = pvUser; 199 return VINF_SUCCESS; 200 } 201 202 203 /** 204 * Removes the ring-0 callback. 205 * 206 * @param pVCpu Pointer to the VMCPU. 207 */ 208 VMMRZDECL(void) VMMRZCallRing3RemoveNotification(PVMCPU pVCpu) 209 { 210 pVCpu->vmm.s.pfnCallRing3CallbackR0 = NULL; 211 } 212 -
trunk/src/VBox/VMM/include/HMInternal.h
r44803 r45091 38 38 #endif 39 39 40 #define VMX_USE_CACHED_VMCS_ACCESSES 40 #ifdef VBOX_WITH_OLD_VTX_CODE 41 # define VMX_USE_CACHED_VMCS_ACCESSES 42 #elif HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 43 # define VMX_USE_CACHED_VMCS_ACCESSES 44 #endif 45 41 46 #define HM_VMX_EMULATE_REALMODE 42 47 … … 69 74 * @{ 70 75 */ 71 # define HM_CHANGED_GUEST_FPU RT_BIT(0)72 # define HM_CHANGED_GUEST_CR0 RT_BIT(1)73 # define HM_CHANGED_GUEST_CR3 RT_BIT(2)74 # define HM_CHANGED_GUEST_CR4 RT_BIT(3)75 # define HM_CHANGED_GUEST_GDTR RT_BIT(4)76 # define HM_CHANGED_GUEST_IDTR RT_BIT(5)77 # define HM_CHANGED_GUEST_LDTR RT_BIT(6)78 # define HM_CHANGED_GUEST_TR RT_BIT(7)79 # define HM_CHANGED_GUEST_MSR RT_BIT(8)80 # define HM_CHANGED_GUEST_SEGMENT_REGS RT_BIT(9)81 # define HM_CHANGED_GUEST_DEBUG RT_BIT(10)82 # define HM_CHANGED_HOST_CONTEXT RT_BIT(11)83 84 # define HM_CHANGED_ALL_GUEST ( HM_CHANGED_GUEST_SEGMENT_REGS \76 #ifdef VBOX_WITH_OLD_VTX_CODE 77 # define HM_CHANGED_GUEST_FPU RT_BIT(0) 78 # define HM_CHANGED_GUEST_CR0 RT_BIT(1) 79 # define HM_CHANGED_GUEST_CR3 RT_BIT(2) 80 # define HM_CHANGED_GUEST_CR4 RT_BIT(3) 81 # define HM_CHANGED_GUEST_GDTR RT_BIT(4) 82 # define HM_CHANGED_GUEST_IDTR RT_BIT(5) 83 # define HM_CHANGED_GUEST_LDTR RT_BIT(6) 84 # define HM_CHANGED_GUEST_TR RT_BIT(7) 85 # define HM_CHANGED_GUEST_MSR RT_BIT(8) 86 # define HM_CHANGED_GUEST_SEGMENT_REGS RT_BIT(9) 87 # define HM_CHANGED_GUEST_DEBUG RT_BIT(10) 88 # define HM_CHANGED_HOST_CONTEXT RT_BIT(11) 89 # define HM_CHANGED_ALL_GUEST ( HM_CHANGED_GUEST_SEGMENT_REGS \ 85 90 | HM_CHANGED_GUEST_CR0 \ 86 91 | HM_CHANGED_GUEST_CR3 \ … … 93 98 | HM_CHANGED_GUEST_DEBUG \ 94 99 | HM_CHANGED_GUEST_FPU) 100 #else 101 # define HM_CHANGED_GUEST_RIP RT_BIT(0) 102 # define HM_CHANGED_GUEST_RSP RT_BIT(1) 103 # define HM_CHANGED_GUEST_RFLAGS RT_BIT(2) 104 # define HM_CHANGED_GUEST_FPU RT_BIT(3) 105 # define HM_CHANGED_GUEST_CR0 RT_BIT(4) 106 # define HM_CHANGED_GUEST_CR2 RT_BIT(5) 107 # define HM_CHANGED_GUEST_CR3 RT_BIT(6) 108 # define HM_CHANGED_GUEST_CR4 RT_BIT(7) 109 # define HM_CHANGED_GUEST_GDTR RT_BIT(8) 110 # define HM_CHANGED_GUEST_IDTR RT_BIT(9) 111 # define HM_CHANGED_GUEST_LDTR RT_BIT(10) 112 # define HM_CHANGED_GUEST_TR RT_BIT(11) 113 # define HM_CHANGED_GUEST_SEGMENT_REGS RT_BIT(12) 114 # define HM_CHANGED_GUEST_DEBUG RT_BIT(13) 115 # define HM_CHANGED_GUEST_FS_BASE_MSR RT_BIT(14) 116 # define HM_CHANGED_GUEST_GS_BASE_MSR RT_BIT(15) 117 # define HM_CHANGED_GUEST_SYSENTER_CS_MSR RT_BIT(16) 118 # define HM_CHANGED_GUEST_SYSENTER_EIP_MSR RT_BIT(17) 119 # define HM_CHANGED_GUEST_SYSENTER_ESP_MSR RT_BIT(18) 120 # define HM_CHANGED_GUEST_INTR_STATE RT_BIT(19) 121 # define HM_CHANGED_VMX_GUEST_AUTO_MSRS RT_BIT(20) 122 # define HM_CHANGED_VMX_GUEST_ACTIVITY_STATE RT_BIT(21) 123 # define HM_CHANGED_VMX_GUEST_APIC_STATE RT_BIT(22) 124 # define HM_CHANGED_VMX_ENTRY_CTLS RT_BIT(23) 125 # define HM_CHANGED_VMX_EXIT_CTLS RT_BIT(24) 126 127 # define HM_CHANGED_HOST_CONTEXT RT_BIT(25) 128 129 # define HM_CHANGED_ALL_GUEST ( HM_CHANGED_GUEST_RIP \ 130 | HM_CHANGED_GUEST_RSP \ 131 | HM_CHANGED_GUEST_RFLAGS \ 132 | HM_CHANGED_GUEST_FPU \ 133 | HM_CHANGED_GUEST_CR0 \ 134 | HM_CHANGED_GUEST_CR2 \ 135 | HM_CHANGED_GUEST_CR3 \ 136 | HM_CHANGED_GUEST_CR4 \ 137 | HM_CHANGED_GUEST_GDTR \ 138 | HM_CHANGED_GUEST_IDTR \ 139 | HM_CHANGED_GUEST_LDTR \ 140 | HM_CHANGED_GUEST_TR \ 141 | HM_CHANGED_GUEST_SEGMENT_REGS \ 142 | HM_CHANGED_GUEST_DEBUG \ 143 | HM_CHANGED_GUEST_FS_BASE_MSR \ 144 | HM_CHANGED_GUEST_GS_BASE_MSR \ 145 | HM_CHANGED_GUEST_SYSENTER_CS_MSR \ 146 | HM_CHANGED_GUEST_SYSENTER_EIP_MSR \ 147 | HM_CHANGED_GUEST_SYSENTER_ESP_MSR \ 148 | HM_CHANGED_GUEST_INTR_STATE \ 149 | HM_CHANGED_VMX_GUEST_AUTO_MSRS \ 150 | HM_CHANGED_VMX_GUEST_ACTIVITY_STATE \ 151 | HM_CHANGED_VMX_GUEST_APIC_STATE \ 152 | HM_CHANGED_VMX_ENTRY_CTLS \ 153 | HM_CHANGED_VMX_EXIT_CTLS) 154 #endif 95 155 96 156 #define HM_CHANGED_ALL (HM_CHANGED_ALL_GUEST | HM_CHANGED_HOST_CONTEXT) … … 537 597 PFNHMVMXSTARTVM pfnStartVM; 538 598 599 uint32_t u32Alignment1; 539 600 #if HC_ARCH_BITS == 32 540 uint32_t u32Alignment; 541 #endif 542 601 uint32_t u32Alignment2; 602 #endif 603 604 /** Current VMX_VMCS32_CTRL_PIN_EXEC_CONTROLS. */ 605 uint32_t u32PinCtls; 543 606 /** Current VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS. */ 544 607 uint32_t u32ProcCtls; … … 607 670 struct 608 671 { 672 X86DESCATTR uAttrCS; 673 X86DESCATTR uAttrDS; 674 X86DESCATTR uAttrES; 675 X86DESCATTR uAttrFS; 676 X86DESCATTR uAttrGS; 677 X86DESCATTR uAttrSS; 609 678 X86EFLAGS eflags; 610 679 uint32_t fRealOnV86Active; … … 741 810 STAMCOUNTER StatExitShadowNM; 742 811 STAMCOUNTER StatExitGuestNM; 743 STAMCOUNTER StatExitShadowPF; 812 STAMCOUNTER StatExitShadowPF; /* Misleading, currently used for MMIO #PFs as well. */ 744 813 STAMCOUNTER StatExitShadowPFEM; 745 814 STAMCOUNTER StatExitGuestPF; -
trunk/src/VBox/VMM/include/VMMInternal.h
r44528 r45091 22 22 #include <VBox/sup.h> 23 23 #include <VBox/vmm/stam.h> 24 #include <VBox/vmm/vmm.h> 24 25 #include <VBox/log.h> 25 26 #include <iprt/critsect.h> 26 27 27 28 28 #if !defined(IN_VMM_R3) && !defined(IN_VMM_R0) && !defined(IN_VMM_RC) … … 48 48 * #if, or by adding VBOX_WITH_R0_LOGGING to your LocalConfig.kmk. 49 49 */ 50 #if defined(DEBUG_sandervl) || defined(DEBUG_frank) || defined(D OXYGEN_RUNNING)50 #if defined(DEBUG_sandervl) || defined(DEBUG_frank) || defined(DEBUG_ramshankar) || defined(DOXYGEN_RUNNING) 51 51 # define VBOX_WITH_R0_LOGGING 52 52 #endif … … 450 450 * anything that needs to be accessed from assembly after it. */ 451 451 VMMR0JMPBUF CallRing3JmpBufR0; 452 /** The Ring-0 notification callback. */ 453 PFNVMMR0CALLRING3NOTIFICATION pfnCallRing3CallbackR0; 454 /** The Ring-0 notification callback user argument. */ 455 void *pvCallRing3CallbackUserR0; 452 456 /** @} */ 453 454 457 } VMMCPU; 455 458 AssertCompileMemberAlignment(VMMCPU, TracerCtx, 8); -
trunk/src/VBox/VMM/include/VMMInternal.mac
r44528 r45091 125 125 .u64CallRing3Arg resq 1 126 126 ; .CallRing3JmpBufR0 resb no-can-do 127 127 ; .pfnCallRing3CallbackR0 128 ; .pvCallRing3CallbackUserR0 128 129 endstruc 129 130
Note:
See TracChangeset
for help on using the changeset viewer.