Changeset 57477 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Aug 20, 2015 2:20:26 PM (9 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r57470 r57477 297 297 static FNSVMEXITHANDLER hmR0SvmExitTaskSwitch; 298 298 static FNSVMEXITHANDLER hmR0SvmExitVmmCall; 299 static FNSVMEXITHANDLER hmR0SvmExitPause; 299 300 static FNSVMEXITHANDLER hmR0SvmExitIret; 300 301 static FNSVMEXITHANDLER hmR0SvmExitXcptPF; … … 674 675 AssertReturn(pVM, VERR_INVALID_PARAMETER); 675 676 Assert(pVM->hm.s.svm.fSupported); 677 678 bool const fPauseFilter = RT_BOOL(pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER); 679 bool const fPauseFilterThreshold = RT_BOOL(pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER_THRESHOLD); 680 bool const fUsePauseFilter = fPauseFilter && pVM->hm.s.svm.cPauseFilter && pVM->hm.s.svm.cPauseFilterThresholdTicks; 676 681 677 682 for (VMCPUID i = 0; i < pVM->cCpus; i++) … … 794 799 if (pVCpu->hm.s.fGIMTrapXcptUD) 795 800 pVmcb->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_UD); 801 802 /* Setup Pause Filter for guest pause-loop (spinlock) exiting. */ 803 if (fUsePauseFilter) 804 { 805 pVmcb->ctrl.u16PauseFilterCount = pVM->hm.s.svm.cPauseFilter; 806 if (fPauseFilterThreshold) 807 pVmcb->ctrl.u16PauseFilterThreshold = pVM->hm.s.svm.cPauseFilterThresholdTicks; 808 } 796 809 797 810 /* … … 3545 3558 case SVM_EXIT_WRITE_CR8: 3546 3559 return hmR0SvmExitWriteCRx(pVCpu, pCtx, pSvmTransient); 3560 3561 case SVM_EXIT_PAUSE: 3562 return hmR0SvmExitPause(pVCpu, pCtx, pSvmTransient); 3547 3563 3548 3564 case SVM_EXIT_VMMCALL: … … 5153 5169 5154 5170 /** 5171 * #VMEXIT handler for VMMCALL (SVM_EXIT_VMMCALL). Conditional #VMEXIT. 5172 */ 5173 HMSVM_EXIT_DECL hmR0SvmExitPause(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 5174 { 5175 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 5176 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPause); 5177 return VINF_EM_RAW_INTERRUPT; 5178 } 5179 5180 5181 /** 5155 5182 * #VMEXIT handler for IRET (SVM_EXIT_IRET). Conditional #VMEXIT. 5156 5183 */ -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r57472 r57477 2445 2445 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP) 2446 2446 val |= VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP; /* Enable RDTSCP support. */ 2447 2448 if ( pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT 2449 && pVM->hm.s.vmx.cPleGapTicks 2450 && pVM->hm.s.vmx.cPleWindowTicks) 2451 { 2452 val |= VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT; /* Enable pause-loop exiting. */ 2453 2454 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_GAP, pVM->hm.s.vmx.cPleGapTicks); 2455 AssertRCReturn(rc, rc); 2456 2457 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_WINDOW, pVM->hm.s.vmx.cPleWindowTicks); 2458 AssertRCReturn(rc, rc); 2459 } 2447 2460 2448 2461 if ((val & zap) != val) … … 10735 10748 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 10736 10749 10737 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC_PAUSE_EXIT. */10738 10750 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPause); 10739 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_PAUSE_EXIT) 10740 return VERR_EM_INTERPRETER; 10741 AssertMsgFailed(("Unexpected PAUSE exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx)); 10742 HMVMX_RETURN_UNEXPECTED_EXIT(); 10751 return VINF_EM_RAW_INTERRUPT; 10743 10752 } 10744 10753 -
trunk/src/VBox/VMM/VMMR3/HM.cpp
r57467 r57477 420 420 #endif 421 421 422 /** @cfgm{/HM/VmxPleGap, uint32_t, 0} 423 * The pause-filter exiting gap in TSC ticks. When the number of ticks between 424 * two successive PAUSE instructions exceeds VmxPleGap, the CPU considers the 425 * latest PAUSE instruction to be start of a new PAUSE loop. 426 */ 427 rc = CFGMR3QueryU32Def(pCfgHM, "VmxPleGap", &pVM->hm.s.vmx.cPleGapTicks, 0); 428 AssertRCReturn(rc, rc); 429 430 /** @cfgm{/HM/VmxPleWindow, uint32_t, 0} 431 * The pause-filter exiting window in TSC ticks. When the number of ticks 432 * between the current PAUSE instruction and first PAUSE of a loop exceeds 433 * VmxPleWindow, a VM-exit is trigerred. 434 * 435 * Setting VmxPleGap and VmxPleGap to 0 disables pause-filter exiting. 436 */ 437 rc = CFGMR3QueryU32Def(pCfgHM, "VmxPleWindow", &pVM->hm.s.vmx.cPleWindowTicks, 0); 438 AssertRCReturn(rc, rc); 439 440 /** @cfgm{/HM/SvmPauseFilterCount, int16_t, 0} 441 * A counter that is decrement each time a PAUSE instruction is executed by the 442 * guest. When the counter is 0, a #VMEXIT is triggered. 443 */ 444 rc = CFGMR3QueryU16Def(pCfgHM, "SvmPauseFilterCount", &pVM->hm.s.svm.cPauseFilter, 0); 445 AssertRCReturn(rc, rc); 446 447 /** @cfgm{/HM/SvmPauseFilterThreshold, uint16_t, 0} 448 * The pause filter threshold in ticks. When the elapsed time between two 449 * successive PAUSE instructions exceeds SvmPauseFilterThreshold, the PauseFilter 450 * count is reset to its initial value. However, if PAUSE is executed PauseFilter 451 * times within PauseFilterThreshold ticks, a VM-exit will be triggered. 452 * 453 * Setting both SvmPauseFilterCount and SvmPauseFilterCount to 0 disables 454 * pause-filter exiting. 455 */ 456 rc = CFGMR3QueryU16Def(pCfgHM, "SvmPauseFilterTreshold", &pVM->hm.s.svm.cPauseFilterThresholdTicks, 0); 457 AssertRCReturn(rc, rc); 458 422 459 /** @cfgm{/HM/Exclusive, bool} 423 460 * Determines the init method for AMD-V and VT-x. If set to true, HM will do a … … 1141 1178 LogRel(("HM: MSR_IA32_VMX_EPT_VPID_CAP = %#RX64\n", val)); 1142 1179 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_RWX_X_ONLY); 1143 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_RWX_W_ONLY); 1144 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_RWX_WX_ONLY); 1145 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_GAW_21_BITS); 1146 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_GAW_30_BITS); 1147 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_GAW_39_BITS); 1148 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_GAW_48_BITS); 1149 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_GAW_57_BITS); 1180 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_PAGE_WALK_LENGTH_4); 1150 1181 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_EMT_UC); 1151 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_EMT_WC);1152 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_EMT_WT);1153 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_EMT_WP);1154 1182 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_EMT_WB); 1155 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_SP_21_BITS); 1156 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_SP_30_BITS); 1157 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_SP_39_BITS); 1158 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_SP_48_BITS); 1183 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_PDE_2M); 1184 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_PDPTE_1G); 1159 1185 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_INVEPT); 1186 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_EPT_ACCESS_DIRTY); 1160 1187 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT); 1161 1188 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS); -
trunk/src/VBox/VMM/include/HMInternal.h
r57470 r57477 432 432 /** Internal Id of which flush-handler to use for tagged-TLB entries. */ 433 433 uint32_t uFlushTaggedTlb; 434 435 /** Pause-loop exiting (PLE) gap in ticks. */ 436 uint32_t cPleGapTicks; 437 /** Pause-loop exiting (PLE) window in ticks. */ 438 uint32_t cPleWindowTicks; 434 439 uint32_t u32Alignment0; 440 435 441 /** Host CR4 value (set by ring-0 VMX init) */ 436 442 uint64_t u64HostCr4; … … 442 448 uint8_t u8Alignment2[7]; 443 449 444 /** VMX MSR values */450 /** VMX MSR values. */ 445 451 VMXMSRS Msrs; 446 452 … … 480 486 /** SVM feature bits from cpuid 0x8000000a */ 481 487 uint32_t u32Features; 488 489 /** Pause filter counter. */ 490 uint16_t cPauseFilter; 491 /** Pause filter treshold in ticks. */ 492 uint16_t cPauseFilterThresholdTicks; 493 uint32_t u32Alignment0; 482 494 } svm; 483 495
Note:
See TracChangeset
for help on using the changeset viewer.