Changeset 75646 in vbox for trunk/src/VBox
- Timestamp:
- Nov 21, 2018 3:38:10 PM (6 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 9 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
r75561 r75646 2649 2649 2650 2650 /** 2651 * Calculates the interruptiblity of the guest. 2652 * 2653 * @returns Interruptibility level. 2654 * @param pVCpu The cross context virtual CPU structure. 2655 */ 2656 VMM_INT_DECL(CPUMINTERRUPTIBILITY) CPUMGetGuestInterruptibility(PVMCPU pVCpu) 2657 { 2658 if (pVCpu->cpum.s.Guest.rflags.Bits.u1IF) 2659 { 2660 if (pVCpu->cpum.s.Guest.hwvirt.fGif) 2661 { 2662 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_BLOCK_NMIS | VMCPU_FF_INHIBIT_INTERRUPTS)) 2663 return CPUMINTERRUPTIBILITY_UNRESTRAINED; 2664 2665 /** @todo does blocking NMIs mean interrupts are also inhibited? */ 2666 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 2667 { 2668 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 2669 return CPUMINTERRUPTIBILITY_INT_INHIBITED; 2670 return CPUMINTERRUPTIBILITY_NMI_INHIBIT; 2671 } 2672 AssertFailed(); 2673 return CPUMINTERRUPTIBILITY_NMI_INHIBIT; 2674 } 2675 return CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT; 2676 } 2677 else 2678 { 2679 if (pVCpu->cpum.s.Guest.hwvirt.fGif) 2680 { 2681 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 2682 return CPUMINTERRUPTIBILITY_NMI_INHIBIT; 2683 return CPUMINTERRUPTIBILITY_INT_DISABLED; 2684 } 2685 return CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT; 2686 } 2687 } 2688 2689 2690 /** 2651 2691 * Checks whether the VMX nested-guest is in a state to receive physical (APIC) 2652 2692 * interrupts. -
trunk/src/VBox/VMM/VMMAll/EMAll.cpp
r74789 r75646 104 104 105 105 /** 106 * Checks if interrupt inhibiting is enabled for the current instruction. 107 * 108 * @returns true if interrupts are inhibited, false if not. 109 * @param pVCpu The cross context virtual CPU structure. 110 */ 111 VMMDECL(bool) EMIsInhibitInterruptsActive(PVMCPU pVCpu) 112 { 113 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 114 return false; 115 if (pVCpu->em.s.GCPtrInhibitInterrupts == CPUMGetGuestRIP(pVCpu)) 116 return true; 117 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 118 return false; 119 } 120 121 122 /** 106 123 * Enables / disable hypercall instructions. 107 124 * … … 165 182 { 166 183 return RT_BOOL(pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_MONITOR_ACTIVE); 184 } 185 186 187 /** 188 * Checks if we're in a MWAIT. 189 * 190 * @retval 1 if regular, 191 * @retval > 1 if MWAIT with EMMWAIT_FLAG_BREAKIRQIF0 192 * @retval 0 if not armed 193 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 194 */ 195 VMM_INT_DECL(unsigned) EMMonitorWaitIsActive(PVMCPU pVCpu) 196 { 197 uint32_t fWait = pVCpu->em.s.MWait.fWait; 198 AssertCompile(EMMWAIT_FLAG_ACTIVE == 1); 199 AssertCompile(EMMWAIT_FLAG_BREAKIRQIF0 == 2); 200 AssertCompile((EMMWAIT_FLAG_ACTIVE << 1) == EMMWAIT_FLAG_BREAKIRQIF0); 201 return fWait & (EMMWAIT_FLAG_ACTIVE | ((fWait & EMMWAIT_FLAG_ACTIVE) << 1)); 167 202 } 168 203 -
trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp
r73097 r75646 2161 2161 * @thread EMT(idCpu). 2162 2162 */ 2163 GVMMR0DECL(int) GVMMR0SchedHalt(PGVM pGVM, PVM pVM, VMCPUID idCpu, uint64_t u64ExpireGipTime) 2164 { 2165 LogFlow(("GVMMR0SchedHalt: pGVM=%p pVM=%p idCpu=%#x u64ExpireGipTime=%#RX64\n", pGVM, pVM, idCpu, u64ExpireGipTime)); 2163 GVMMR0DECL(int) GVMMR0SchedHalt(PGVM pGVM, PVM pVM, PGVMCPU pCurGVCpu, uint64_t u64ExpireGipTime) 2164 { 2165 LogFlow(("GVMMR0SchedHalt: pGVM=%p pVM=%p pCurGVCpu=%p(%d) u64ExpireGipTime=%#RX64\n", 2166 pGVM, pVM, pCurGVCpu, pCurGVCpu->idCpu, u64ExpireGipTime)); 2166 2167 GVMM_CHECK_SMAP_SETUP(); 2167 2168 GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING); 2168 2169 2169 /*2170 * Validate the VM structure, state and handle.2171 */2172 2170 PGVMM pGVMM; 2173 int rc = gvmmR0ByGVMandVMandEMT(pGVM, pVM, idCpu, &pGVMM); 2174 if (RT_FAILURE(rc)) 2175 return rc; 2171 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_GVMM_INSTANCE); 2172 2176 2173 pGVM->gvmm.s.StatsSched.cHaltCalls++; 2177 GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);2178 2179 PGVMCPU pCurGVCpu = &pGVM->aCpus[idCpu];2180 2174 Assert(!pCurGVCpu->gvmm.s.u64HaltExpire); 2181 2175 … … 2188 2182 if (fDoEarlyWakeUps) 2189 2183 { 2190 rc = GVMMR0_USED_SHARED_LOCK(pGVMM); AssertRC(rc);2184 int rc2 = GVMMR0_USED_SHARED_LOCK(pGVMM); AssertRC(rc2); 2191 2185 GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING); 2192 2186 } … … 2212 2206 * Cap the sleep time to 1 second to be on the safe side. 2213 2207 */ 2208 int rc; 2214 2209 uint64_t cNsInterval = u64ExpireGipTime - u64NowGip; 2215 2210 if ( u64NowGip < u64ExpireGipTime … … 2259 2254 RTSemEventMultiReset(pCurGVCpu->gvmm.s.HaltEventMulti); 2260 2255 GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING); 2256 rc = VINF_SUCCESS; 2261 2257 } 2262 2258 2263 2259 return rc; 2264 2260 } 2261 2262 2263 /** 2264 * Halt the EMT thread. 2265 * 2266 * @returns VINF_SUCCESS normal wakeup (timeout or kicked by other thread). 2267 * VERR_INTERRUPTED if a signal was scheduled for the thread. 2268 * @param pGVM The global (ring-0) VM structure. 2269 * @param pVM The cross context VM structure. 2270 * @param idCpu The Virtual CPU ID of the calling EMT. 2271 * @param u64ExpireGipTime The time for the sleep to expire expressed as GIP time. 2272 * @thread EMT(idCpu). 2273 */ 2274 GVMMR0DECL(int) GVMMR0SchedHaltReq(PGVM pGVM, PVM pVM, VMCPUID idCpu, uint64_t u64ExpireGipTime) 2275 { 2276 GVMM_CHECK_SMAP_SETUP(); 2277 GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING); 2278 PGVMM pGVMM; 2279 int rc = gvmmR0ByGVMandVMandEMT(pGVM, pVM, idCpu, &pGVMM); 2280 if (RT_SUCCESS(rc)) 2281 { 2282 GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING); 2283 rc = GVMMR0SchedHalt(pGVM, pVM, &pGVM->aCpus[idCpu], u64ExpireGipTime); 2284 } 2285 GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING); 2286 return rc; 2287 } 2288 2265 2289 2266 2290 -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r75611 r75646 7541 7541 * a valid interrupt we must- deliver the interrupt. We can no longer re-request it from the APIC. 7542 7542 */ 7543 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))7543 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC) 7544 7544 && !pVCpu->hm.s.fSingleInstruction) 7545 7545 { -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r74785 r75646 60 60 #include <iprt/thread.h> 61 61 #include <iprt/timer.h> 62 #include <iprt/time.h> 62 63 63 64 #include "dtrace/VBoxVMM.h" … … 607 608 608 609 /** 610 * An interrupt or unhalt force flag is set, deal with it. 611 * 612 * @returns VINF_SUCCESS (or VINF_EM_HALT). 613 * @param pVCpu The cross context virtual CPU structure. 614 * @param uMWait Result from EMMonitorWaitIsActive(). 615 * @param enmInterruptibility Guest CPU interruptbility level. 616 */ 617 static int vmmR0DoHaltInterrupt(PVMCPU pVCpu, unsigned uMWait, CPUMINTERRUPTIBILITY enmInterruptibility) 618 { 619 Assert(!TRPMHasTrap(pVCpu)); 620 621 /* 622 * Pending interrupts w/o any SMIs or NMIs? That the usual case. 623 */ 624 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC) 625 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_INTERRUPT_NMI)) 626 { 627 if (enmInterruptibility <= CPUMINTERRUPTIBILITY_INT_INHIBITED) 628 { 629 uint8_t u8Interrupt = 0; 630 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt); 631 Log(("vmmR0DoHaltInterrupt: CPU%d u8Interrupt=%d (%#x) rc=%Rrc\n", pVCpu->idCpu, u8Interrupt, u8Interrupt, rc)); 632 if (RT_SUCCESS(rc)) 633 { 634 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT); 635 636 rc = TRPMAssertTrap(pVCpu, u8Interrupt, TRPM_HARDWARE_INT); 637 AssertRCSuccess(rc); 638 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec); 639 return rc; 640 } 641 } 642 } 643 /* 644 * SMI is not implemented yet, at least not here. 645 */ 646 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI)) 647 { 648 return VINF_EM_HALT; 649 } 650 /* 651 * NMI. 652 */ 653 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)) 654 { 655 if (enmInterruptibility < CPUMINTERRUPTIBILITY_NMI_INHIBIT) 656 { 657 /** @todo later. */ 658 return VINF_EM_HALT; 659 } 660 } 661 662 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UNHALT)) 663 { 664 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec); 665 return VINF_SUCCESS; 666 } 667 if (uMWait > 1) 668 { 669 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec); 670 return VINF_SUCCESS; 671 } 672 673 return VINF_EM_HALT; 674 } 675 676 677 /** 678 * This does one round of vmR3HaltGlobal1Halt(). 679 * 680 * The rational here is that we'll reduce latency in interrupt situations if we 681 * don't go to ring-3 immediately on a VINF_EM_HALT (guest executed HLT or 682 * MWAIT), but do one round of blocking here instead and hope the interrupt is 683 * raised in the meanwhile. 684 * 685 * If we go to ring-3 we'll quit the inner HM/NEM loop in EM and end up in the 686 * outer loop, which will then call VMR3WaitHalted() and that in turn will do a 687 * ring-0 call (unless we're too close to a timer event). When the interrupt 688 * wakes us up, we'll return from ring-0 and EM will by instinct do a 689 * rescheduling (because of raw-mode) before it resumes the HM/NEM loop and gets 690 * back to VMMR0EntryFast(). 691 * 692 * @returns VINF_SUCCESS or VINF_EM_HALT. 693 * @param pGVM The ring-0 VM structure. 694 * @param pVM The cross context VM structure. 695 * @param pGVCpu The ring-0 virtual CPU structure. 696 * @param pVCpu The cross context virtual CPU structure. 697 * 698 * @todo r=bird: All the blocking/waiting and EMT managment should move out of 699 * the VM module, probably to VMM. Then this would be more weird wrt 700 * parameters and statistics. 701 */ 702 static int vmmR0DoHalt(PGVM pGVM, PVM pVM, PGVMCPU pGVCpu, PVMCPU pVCpu) 703 { 704 Assert(pVCpu == pGVCpu->pVCpu); 705 706 /* 707 * Do spin stat historization. 708 */ 709 if (++pVCpu->vmm.s.cR0Halts & 0xff) 710 { /* likely */ } 711 else if (pVCpu->vmm.s.cR0HaltsSucceeded > pVCpu->vmm.s.cR0HaltsToRing3) 712 { 713 pVCpu->vmm.s.cR0HaltsSucceeded = 2; 714 pVCpu->vmm.s.cR0HaltsToRing3 = 0; 715 } 716 else 717 { 718 pVCpu->vmm.s.cR0HaltsSucceeded = 0; 719 pVCpu->vmm.s.cR0HaltsToRing3 = 2; 720 } 721 722 /* 723 * Flags that makes us go to ring-3. 724 */ 725 uint32_t const fVmFFs = VM_FF_TM_VIRTUAL_SYNC | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA 726 | VM_FF_DBGF | VM_FF_REQUEST | VM_FF_CHECK_VM_STATE 727 | VM_FF_RESET | VM_FF_EMT_RENDEZVOUS | VM_FF_PGM_NEED_HANDY_PAGES 728 | VM_FF_PGM_NO_MEMORY | VM_FF_REM_HANDLER_NOTIFY | VM_FF_DEBUG_SUSPEND; 729 uint64_t const fCpuFFs = VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT | VMCPU_FF_IEM 730 | VMCPU_FF_REQUEST | VMCPU_FF_DBGF | VMCPU_FF_HM_UPDATE_CR3 731 | VMCPU_FF_HM_UPDATE_PAE_PDPES | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL 732 | VMCPU_FF_TO_R3 | VMCPU_FF_IOM | VMCPU_FF_INTERRUPT_NESTED_GUEST /*?*/ 733 | VMCPU_FF_VMX_PREEMPT_TIMER /*?*/ | VMCPU_FF_VMX_APIC_WRITE /*?*/ | VMCPU_FF_VMX_MTF /*?*/ 734 #ifdef VBOX_WITH_RAW_MODE 735 | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_SELM_SYNC_GDT 736 | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_CSAM_SCAN_PAGE | VMCPU_FF_CSAM_PENDING_ACTION 737 | VMCPU_FF_CPUM 738 #endif 739 ; 740 741 /* 742 * Check preconditions. 743 */ 744 unsigned const uMWait = EMMonitorWaitIsActive(pVCpu); 745 CPUMINTERRUPTIBILITY const enmInterruptibility = CPUMGetGuestInterruptibility(pVCpu); 746 if ( pVCpu->vmm.s.fMayHaltInRing0 747 && !TRPMHasTrap(pVCpu) 748 && ( enmInterruptibility <= CPUMINTERRUPTIBILITY_INT_INHIBITED 749 || uMWait > 1)) 750 { 751 if ( !VM_FF_IS_ANY_SET(pVM, fVmFFs) 752 && !VMCPU_FF_IS_ANY_SET(pVCpu, fCpuFFs)) 753 { 754 /* 755 * Interrupts pending already? 756 */ 757 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC)) 758 APICUpdatePendingInterrupts(pVCpu); 759 760 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC 761 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT)) 762 return vmmR0DoHaltInterrupt(pVCpu, uMWait, enmInterruptibility); 763 ASMNopPause(); 764 765 /* 766 * Check out how long till the next timer event. 767 */ 768 uint64_t u64Delta; 769 uint64_t u64GipTime = TMTimerPollGIP(pVM, pVCpu, &u64Delta); 770 771 if ( !VM_FF_IS_ANY_SET(pVM, fVmFFs) 772 && !VMCPU_FF_IS_ANY_SET(pVCpu, fCpuFFs)) 773 { 774 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC)) 775 APICUpdatePendingInterrupts(pVCpu); 776 777 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC 778 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT)) 779 return vmmR0DoHaltInterrupt(pVCpu, uMWait, enmInterruptibility); 780 781 /* 782 * Wait if there is enough time to the next timer event. 783 */ 784 if (u64Delta >= pVCpu->vmm.s.cNsSpinBlockThreshold) 785 { 786 /* If there are few other CPU cores around, we will procrastinate a 787 little before going to sleep, hoping for some device raising an 788 interrupt or similar. Though, the best thing here would be to 789 dynamically adjust the spin count according to its usfulness or 790 something... */ 791 if ( pVCpu->vmm.s.cR0HaltsSucceeded > pVCpu->vmm.s.cR0HaltsToRing3 792 && RTMpGetOnlineCount() >= 4) 793 { 794 /** @todo Figure out how we can skip this if it haven't help recently... */ 795 uint32_t cSpinLoops = 42; 796 while (cSpinLoops-- > 0) 797 { 798 ASMNopPause(); 799 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC)) 800 APICUpdatePendingInterrupts(pVCpu); 801 ASMNopPause(); 802 if (VM_FF_IS_ANY_SET(pVM, fVmFFs)) 803 { 804 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3FromSpin); 805 return VINF_EM_HALT; 806 } 807 ASMNopPause(); 808 if (VMCPU_FF_IS_ANY_SET(pVCpu, fCpuFFs)) 809 { 810 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3FromSpin); 811 return VINF_EM_HALT; 812 } 813 ASMNopPause(); 814 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC 815 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT)) 816 { 817 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExecFromSpin); 818 return vmmR0DoHaltInterrupt(pVCpu, uMWait, enmInterruptibility); 819 } 820 ASMNopPause(); 821 } 822 } 823 824 /* Block. We have to set the state to VMCPUSTATE_STARTED_HALTED here so ring-3 825 knows when to notify us (cannot access VMINTUSERPERVMCPU::fWait from here). */ 826 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HALTED, VMCPUSTATE_STARTED); 827 uint64_t const u64StartSchedHalt = RTTimeNanoTS(); 828 int rc = GVMMR0SchedHalt(pGVM, pVM, pGVCpu, u64GipTime); 829 uint64_t const u64EndSchedHalt = RTTimeNanoTS(); 830 uint64_t const cNsElapsedSchedHalt = u64EndSchedHalt - u64StartSchedHalt; 831 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED); 832 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->vmm.s.StatR0HaltBlock, cNsElapsedSchedHalt); 833 if ( rc == VINF_SUCCESS 834 || rc == VERR_INTERRUPTED) 835 836 { 837 /* Keep some stats like ring-3 does. */ 838 int64_t const cNsOverslept = u64EndSchedHalt - u64GipTime; 839 if (cNsOverslept > 50000) 840 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->vmm.s.StatR0HaltBlockOverslept, cNsOverslept); 841 else if (cNsOverslept < -50000) 842 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->vmm.s.StatR0HaltBlockInsomnia, cNsElapsedSchedHalt); 843 else 844 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->vmm.s.StatR0HaltBlockOnTime, cNsElapsedSchedHalt); 845 846 /* 847 * Recheck whether we can resume execution or have to go to ring-3. 848 */ 849 if ( !VM_FF_IS_ANY_SET(pVM, fVmFFs) 850 && !VMCPU_FF_IS_ANY_SET(pVCpu, fCpuFFs)) 851 { 852 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC)) 853 APICUpdatePendingInterrupts(pVCpu); 854 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC 855 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT)) 856 { 857 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExecFromBlock); 858 return vmmR0DoHaltInterrupt(pVCpu, uMWait, enmInterruptibility); 859 } 860 } 861 } 862 } 863 } 864 } 865 } 866 return VINF_EM_HALT; 867 } 868 869 870 /** 609 871 * VMM ring-0 thread-context callback. 610 872 * … … 1184 1446 case VMMR0_DO_HM_RUN: 1185 1447 { 1186 /* 1187 * Disable preemption. 1188 */ 1189 Assert(!vmmR0ThreadCtxHookIsEnabled(pVCpu)); 1190 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; 1191 RTThreadPreemptDisable(&PreemptState); 1192 1193 /* 1194 * Get the host CPU identifiers, make sure they are valid and that 1195 * we've got a TSC delta for the CPU. 1196 */ 1197 RTCPUID idHostCpu; 1198 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu); 1199 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS 1200 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet))) 1448 for (;;) /* hlt loop */ 1201 1449 { 1202 pVCpu->iHostCpuSet = iHostCpuSet;1203 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);1204 1205 1450 /* 1206 * Update the periodic preemption timer if it's active.1451 * Disable preemption. 1207 1452 */ 1208 if (pVM->vmm.s.fUsePeriodicPreemptionTimers) 1209 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu)); 1210 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING); 1453 Assert(!vmmR0ThreadCtxHookIsEnabled(pVCpu)); 1454 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; 1455 RTThreadPreemptDisable(&PreemptState); 1456 1457 /* 1458 * Get the host CPU identifiers, make sure they are valid and that 1459 * we've got a TSC delta for the CPU. 1460 */ 1461 RTCPUID idHostCpu; 1462 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu); 1463 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS 1464 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet))) 1465 { 1466 pVCpu->iHostCpuSet = iHostCpuSet; 1467 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu); 1468 1469 /* 1470 * Update the periodic preemption timer if it's active. 1471 */ 1472 if (pVM->vmm.s.fUsePeriodicPreemptionTimers) 1473 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu)); 1474 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING); 1211 1475 1212 1476 #ifdef VMM_R0_TOUCH_FPU 1213 /*1214 * Make sure we've got the FPU state loaded so and we don't need to clear1215 * CR0.TS and get out of sync with the host kernel when loading the guest1216 * FPU state. @ref sec_cpum_fpu (CPUM.cpp) and @bugref{4053}.1217 */1218 CPUMR0TouchHostFpu();1219 #endif1220 int rc;1221 bool fPreemptRestored = false;1222 if (!HMR0SuspendPending())1223 {1224 1477 /* 1225 * Enable the context switching hook. 1478 * Make sure we've got the FPU state loaded so and we don't need to clear 1479 * CR0.TS and get out of sync with the host kernel when loading the guest 1480 * FPU state. @ref sec_cpum_fpu (CPUM.cpp) and @bugref{4053}. 1226 1481 */ 1227 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK) 1482 CPUMR0TouchHostFpu(); 1483 #endif 1484 int rc; 1485 bool fPreemptRestored = false; 1486 if (!HMR0SuspendPending()) 1228 1487 { 1229 Assert(!RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook)); 1230 int rc2 = RTThreadCtxHookEnable(pVCpu->vmm.s.hCtxHook); AssertRC(rc2); 1488 /* 1489 * Enable the context switching hook. 1490 */ 1491 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK) 1492 { 1493 Assert(!RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook)); 1494 int rc2 = RTThreadCtxHookEnable(pVCpu->vmm.s.hCtxHook); AssertRC(rc2); 1495 } 1496 1497 /* 1498 * Enter HM context. 1499 */ 1500 rc = HMR0Enter(pVCpu); 1501 if (RT_SUCCESS(rc)) 1502 { 1503 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM); 1504 1505 /* 1506 * When preemption hooks are in place, enable preemption now that 1507 * we're in HM context. 1508 */ 1509 if (vmmR0ThreadCtxHookIsEnabled(pVCpu)) 1510 { 1511 fPreemptRestored = true; 1512 RTThreadPreemptRestore(&PreemptState); 1513 } 1514 1515 /* 1516 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode). 1517 */ 1518 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING); 1519 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pVM, pVCpu); 1520 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING); 1521 1522 /* 1523 * Assert sanity on the way out. Using manual assertions code here as normal 1524 * assertions are going to panic the host since we're outside the setjmp/longjmp zone. 1525 */ 1526 if (RT_UNLIKELY( VMCPU_GET_STATE(pVCpu) != VMCPUSTATE_STARTED_HM 1527 && RT_SUCCESS_NP(rc) && rc != VINF_VMM_CALL_HOST )) 1528 { 1529 pVM->vmm.s.szRing0AssertMsg1[0] = '\0'; 1530 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), 1531 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pVCpu), VMCPUSTATE_STARTED_HM); 1532 rc = VERR_VMM_WRONG_HM_VMCPU_STATE; 1533 } 1534 /** @todo Get rid of this. HM shouldn't disable the context hook. */ 1535 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pVCpu))) 1536 { 1537 pVM->vmm.s.szRing0AssertMsg1[0] = '\0'; 1538 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), 1539 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pVCpu, pVCpu->idCpu, rc); 1540 rc = VERR_INVALID_STATE; 1541 } 1542 1543 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED); 1544 } 1545 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC); 1546 1547 /* 1548 * Invalidate the host CPU identifiers before we disable the context 1549 * hook / restore preemption. 1550 */ 1551 pVCpu->iHostCpuSet = UINT32_MAX; 1552 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID); 1553 1554 /* 1555 * Disable context hooks. Due to unresolved cleanup issues, we 1556 * cannot leave the hooks enabled when we return to ring-3. 1557 * 1558 * Note! At the moment HM may also have disabled the hook 1559 * when we get here, but the IPRT API handles that. 1560 */ 1561 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK) 1562 { 1563 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID); 1564 RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook); 1565 } 1231 1566 } 1232 1233 1567 /* 1234 * Enter HM context.1568 * The system is about to go into suspend mode; go back to ring 3. 1235 1569 */ 1236 rc = HMR0Enter(pVCpu); 1237 if (RT_SUCCESS(rc)) 1570 else 1238 1571 { 1239 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM); 1240 1241 /* 1242 * When preemption hooks are in place, enable preemption now that 1243 * we're in HM context. 1244 */ 1245 if (vmmR0ThreadCtxHookIsEnabled(pVCpu)) 1572 rc = VINF_EM_RAW_INTERRUPT; 1573 pVCpu->iHostCpuSet = UINT32_MAX; 1574 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID); 1575 } 1576 1577 /** @todo When HM stops messing with the context hook state, we'll disable 1578 * preemption again before the RTThreadCtxHookDisable call. */ 1579 if (!fPreemptRestored) 1580 RTThreadPreemptRestore(&PreemptState); 1581 1582 pVCpu->vmm.s.iLastGZRc = rc; 1583 1584 /* Fire dtrace probe and collect statistics. */ 1585 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc); 1586 #ifdef VBOX_WITH_STATISTICS 1587 vmmR0RecordRC(pVM, pVCpu, rc); 1588 #endif 1589 1590 /* 1591 * If this is a halt. 1592 */ 1593 if (rc != VINF_EM_HALT) 1594 { /* we're not in a hurry for a HLT, so prefer this path */ } 1595 else //if (VMCPU_FF_IS_ANY_SET()) 1596 { 1597 pVCpu->vmm.s.iLastGZRc = rc = vmmR0DoHalt(pGVM, pVM, pGVCpu, pVCpu); 1598 if (rc == VINF_SUCCESS) 1246 1599 { 1247 fPreemptRestored = true;1248 RTThreadPreemptRestore(&PreemptState);1600 pVCpu->vmm.s.cR0HaltsSucceeded++; 1601 continue; 1249 1602 } 1250 1251 /* 1252 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode). 1253 */ 1254 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING); 1255 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pVM, pVCpu); 1256 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING); 1257 1258 /* 1259 * Assert sanity on the way out. Using manual assertions code here as normal 1260 * assertions are going to panic the host since we're outside the setjmp/longjmp zone. 1261 */ 1262 if (RT_UNLIKELY( VMCPU_GET_STATE(pVCpu) != VMCPUSTATE_STARTED_HM 1263 && RT_SUCCESS_NP(rc) && rc != VINF_VMM_CALL_HOST )) 1264 { 1265 pVM->vmm.s.szRing0AssertMsg1[0] = '\0'; 1266 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), 1267 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pVCpu), VMCPUSTATE_STARTED_HM); 1268 rc = VERR_VMM_WRONG_HM_VMCPU_STATE; 1269 } 1270 /** @todo Get rid of this. HM shouldn't disable the context hook. */ 1271 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pVCpu))) 1272 { 1273 pVM->vmm.s.szRing0AssertMsg1[0] = '\0'; 1274 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), 1275 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pVCpu, pVCpu->idCpu, rc); 1276 rc = VERR_INVALID_STATE; 1277 } 1278 1279 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED); 1280 } 1281 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC); 1282 1283 /* 1284 * Invalidate the host CPU identifiers before we disable the context 1285 * hook / restore preemption. 1286 */ 1287 pVCpu->iHostCpuSet = UINT32_MAX; 1288 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID); 1289 1290 /* 1291 * Disable context hooks. Due to unresolved cleanup issues, we 1292 * cannot leave the hooks enabled when we return to ring-3. 1293 * 1294 * Note! At the moment HM may also have disabled the hook 1295 * when we get here, but the IPRT API handles that. 1296 */ 1297 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK) 1298 { 1299 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID); 1300 RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook); 1603 pVCpu->vmm.s.cR0HaltsToRing3++; 1301 1604 } 1302 1605 } 1303 1606 /* 1304 * The system is about to go into suspend mode; go back to ring 3.1607 * Invalid CPU set index or TSC delta in need of measuring. 1305 1608 */ 1306 1609 else 1307 1610 { 1308 rc = VINF_EM_RAW_INTERRUPT;1309 1611 pVCpu->iHostCpuSet = UINT32_MAX; 1310 1612 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID); 1613 RTThreadPreemptRestore(&PreemptState); 1614 if (iHostCpuSet < RTCPUSET_MAX_CPUS) 1615 { 1616 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/, 1617 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/, 1618 0 /*default cTries*/); 1619 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE) 1620 pVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3; 1621 else 1622 pVCpu->vmm.s.iLastGZRc = rc; 1623 } 1624 else 1625 pVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX; 1311 1626 } 1312 1313 /** @todo When HM stops messing with the context hook state, we'll disable 1314 * preemption again before the RTThreadCtxHookDisable call. */ 1315 if (!fPreemptRestored) 1316 RTThreadPreemptRestore(&PreemptState); 1317 1318 pVCpu->vmm.s.iLastGZRc = rc; 1319 1320 /* Fire dtrace probe and collect statistics. */ 1321 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc); 1322 #ifdef VBOX_WITH_STATISTICS 1323 vmmR0RecordRC(pVM, pVCpu, rc); 1324 #endif 1325 } 1326 /* 1327 * Invalid CPU set index or TSC delta in need of measuring. 1328 */ 1329 else 1330 { 1331 pVCpu->iHostCpuSet = UINT32_MAX; 1332 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID); 1333 RTThreadPreemptRestore(&PreemptState); 1334 if (iHostCpuSet < RTCPUSET_MAX_CPUS) 1335 { 1336 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/, 1337 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/, 1338 0 /*default cTries*/); 1339 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE) 1340 pVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3; 1341 else 1342 pVCpu->vmm.s.iLastGZRc = rc; 1343 } 1344 else 1345 pVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX; 1346 } 1627 break; 1628 1629 } /* halt loop. */ 1347 1630 break; 1348 1631 } … … 1541 1824 return VERR_INVALID_PARAMETER; 1542 1825 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING); 1543 rc = GVMMR0SchedHalt (pGVM, pVM, idCpu, u64Arg);1826 rc = GVMMR0SchedHaltReq(pGVM, pVM, idCpu, u64Arg); 1544 1827 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING); 1545 1828 break; -
trunk/src/VBox/VMM/VMMR3/VMEmt.cpp
r74798 r75646 762 762 int64_t const cNsOverslept = u64EndSchedHalt - u64GipTime; 763 763 if (cNsOverslept > 50000) 764 STAM_ PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlockOverslept, cNsOverslept);764 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlockOverslept, cNsOverslept); 765 765 else if (cNsOverslept < -50000) 766 STAM_ PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlockInsomnia, cNsElapsedSchedHalt);766 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlockInsomnia, cNsElapsedSchedHalt); 767 767 else 768 STAM_ PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlockOnTime, cNsElapsedSchedHalt);768 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlockOnTime, cNsElapsedSchedHalt); 769 769 } 770 770 } … … 839 839 static DECLCALLBACK(void) vmR3HaltGlobal1NotifyCpuFF(PUVMCPU pUVCpu, uint32_t fFlags) 840 840 { 841 if (pUVCpu->vm.s.fWait) 842 { 843 int rc = SUPR3CallVMMR0Ex(pUVCpu->pVM->pVMR0, pUVCpu->idCpu, VMMR0_DO_GVMM_SCHED_WAKE_UP, 0, NULL); 844 AssertRC(rc); 845 } 846 else if ( (fFlags & VMNOTIFYFF_FLAGS_POKE) 847 || !(fFlags & VMNOTIFYFF_FLAGS_DONE_REM)) 848 { 849 PVMCPU pVCpu = pUVCpu->pVCpu; 850 if (pVCpu) 851 { 852 VMCPUSTATE enmState = VMCPU_GET_STATE(pVCpu); 841 /* 842 * With ring-0 halting, the fWait flag isn't set, so we have to check the 843 * CPU state to figure out whether to do a wakeup call. 844 */ 845 PVMCPU pVCpu = pUVCpu->pVCpu; 846 if (pVCpu) 847 { 848 VMCPUSTATE enmState = VMCPU_GET_STATE(pVCpu); 849 if (enmState == VMCPUSTATE_STARTED_HALTED || pUVCpu->vm.s.fWait) 850 { 851 int rc = SUPR3CallVMMR0Ex(pUVCpu->pVM->pVMR0, pUVCpu->idCpu, VMMR0_DO_GVMM_SCHED_WAKE_UP, 0, NULL); 852 AssertRC(rc); 853 854 } 855 else if ( (fFlags & VMNOTIFYFF_FLAGS_POKE) 856 || !(fFlags & VMNOTIFYFF_FLAGS_DONE_REM)) 857 { 853 858 if (enmState == VMCPUSTATE_STARTED_EXEC) 854 859 { … … 870 875 #endif 871 876 } 877 } 878 /* This probably makes little sense: */ 879 else if (pUVCpu->vm.s.fWait) 880 { 881 int rc = SUPR3CallVMMR0Ex(pUVCpu->pVM->pVMR0, pUVCpu->idCpu, VMMR0_DO_GVMM_SCHED_WAKE_UP, 0, NULL); 882 AssertRC(rc); 872 883 } 873 884 } … … 1021 1032 static const struct VMHALTMETHODDESC 1022 1033 { 1023 /** The halt method id. */ 1024 VMHALTMETHOD enmHaltMethod; 1034 /** The halt method ID. */ 1035 VMHALTMETHOD enmHaltMethod; 1036 /** Set if the method support halting directly in ring-0. */ 1037 bool fMayHaltInRing0; 1025 1038 /** The init function for loading config and initialize variables. */ 1026 DECLR3CALLBACKMEMBER(int, pfnInit,(PUVM pUVM));1039 DECLR3CALLBACKMEMBER(int, pfnInit,(PUVM pUVM)); 1027 1040 /** The term function. */ 1028 DECLR3CALLBACKMEMBER(void, pfnTerm,(PUVM pUVM));1041 DECLR3CALLBACKMEMBER(void, pfnTerm,(PUVM pUVM)); 1029 1042 /** The VMR3WaitHaltedU function. */ 1030 DECLR3CALLBACKMEMBER(int, pfnHalt,(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t u64Now));1043 DECLR3CALLBACKMEMBER(int, pfnHalt,(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t u64Now)); 1031 1044 /** The VMR3WaitU function. */ 1032 DECLR3CALLBACKMEMBER(int, pfnWait,(PUVMCPU pUVCpu));1045 DECLR3CALLBACKMEMBER(int, pfnWait,(PUVMCPU pUVCpu)); 1033 1046 /** The VMR3NotifyCpuFFU function. */ 1034 DECLR3CALLBACKMEMBER(void, pfnNotifyCpuFF,(PUVMCPU pUVCpu, uint32_t fFlags));1047 DECLR3CALLBACKMEMBER(void, pfnNotifyCpuFF,(PUVMCPU pUVCpu, uint32_t fFlags)); 1035 1048 /** The VMR3NotifyGlobalFFU function. */ 1036 DECLR3CALLBACKMEMBER(void, pfnNotifyGlobalFF,(PUVM pUVM, uint32_t fFlags));1049 DECLR3CALLBACKMEMBER(void, pfnNotifyGlobalFF,(PUVM pUVM, uint32_t fFlags)); 1037 1050 } g_aHaltMethods[] = 1038 1051 { 1039 { VMHALTMETHOD_BOOTSTRAP, NULL, NULL, NULL, vmR3BootstrapWait, vmR3BootstrapNotifyCpuFF, NULL },1040 { VMHALTMETHOD_OLD, NULL, NULL, vmR3HaltOldDoHalt, vmR3DefaultWait, vmR3DefaultNotifyCpuFF, NULL },1041 { VMHALTMETHOD_1, vmR3HaltMethod1Init, NULL, vmR3HaltMethod1Halt, vmR3DefaultWait, vmR3DefaultNotifyCpuFF, NULL },1042 { VMHALTMETHOD_GLOBAL_1, vmR3HaltGlobal1Init, NULL, vmR3HaltGlobal1Halt, vmR3HaltGlobal1Wait, vmR3HaltGlobal1NotifyCpuFF, NULL },1052 { VMHALTMETHOD_BOOTSTRAP, false, NULL, NULL, NULL, vmR3BootstrapWait, vmR3BootstrapNotifyCpuFF, NULL }, 1053 { VMHALTMETHOD_OLD, false, NULL, NULL, vmR3HaltOldDoHalt, vmR3DefaultWait, vmR3DefaultNotifyCpuFF, NULL }, 1054 { VMHALTMETHOD_1, false, vmR3HaltMethod1Init, NULL, vmR3HaltMethod1Halt, vmR3DefaultWait, vmR3DefaultNotifyCpuFF, NULL }, 1055 { VMHALTMETHOD_GLOBAL_1, true, vmR3HaltGlobal1Init, NULL, vmR3HaltGlobal1Halt, vmR3HaltGlobal1Wait, vmR3HaltGlobal1NotifyCpuFF, NULL }, 1043 1056 }; 1044 1057 … … 1057 1070 { 1058 1071 LogFlow(("VMR3NotifyGlobalFFU:\n")); 1059 uint32_t iHal dMethod = pUVM->vm.s.iHaltMethod;1060 1061 if (g_aHaltMethods[iHal dMethod].pfnNotifyGlobalFF) /** @todo make mandatory. */1062 g_aHaltMethods[iHal dMethod].pfnNotifyGlobalFF(pUVM, fFlags);1072 uint32_t iHaltMethod = pUVM->vm.s.iHaltMethod; 1073 1074 if (g_aHaltMethods[iHaltMethod].pfnNotifyGlobalFF) /** @todo make mandatory. */ 1075 g_aHaltMethods[iHaltMethod].pfnNotifyGlobalFF(pUVM, fFlags); 1063 1076 else 1064 1077 for (VMCPUID iCpu = 0; iCpu < pUVM->cCpus; iCpu++) 1065 g_aHaltMethods[iHal dMethod].pfnNotifyCpuFF(&pUVM->aCpus[iCpu], fFlags);1078 g_aHaltMethods[iHaltMethod].pfnNotifyCpuFF(&pUVM->aCpus[iCpu], fFlags); 1066 1079 } 1067 1080 … … 1292 1305 ASMAtomicWriteU32(&pUVM->vm.s.iHaltMethod, i); 1293 1306 1307 VMMR3SetMayHaltInRing0(pVCpu, g_aHaltMethods[i].fMayHaltInRing0, 1308 g_aHaltMethods[i].enmHaltMethod == VMHALTMETHOD_GLOBAL_1 1309 ? pUVM->vm.s.Halt.Global1.cNsSpinBlockThresholdCfg : 0); 1310 1294 1311 return rc; 1295 1312 } -
trunk/src/VBox/VMM/VMMR3/VMM.cpp
r75316 r75646 585 585 } 586 586 #endif 587 for (VMCPUID i = 0; i < pVM->cCpus; i++) 588 { 589 STAMR3RegisterF(pVM, &pVM->aCpus[i].vmm.s.StatR0HaltBlock, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "", "/PROF/CPU%u/VM/Halt/R0HaltBlock", i); 590 STAMR3RegisterF(pVM, &pVM->aCpus[i].vmm.s.StatR0HaltBlockOnTime, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "", "/PROF/CPU%u/VM/Halt/R0HaltBlockOnTime", i); 591 STAMR3RegisterF(pVM, &pVM->aCpus[i].vmm.s.StatR0HaltBlockOverslept, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "", "/PROF/CPU%u/VM/Halt/R0HaltBlockOverslept", i); 592 STAMR3RegisterF(pVM, &pVM->aCpus[i].vmm.s.StatR0HaltBlockInsomnia, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "", "/PROF/CPU%u/VM/Halt/R0HaltBlockInsomnia", i); 593 STAMR3RegisterF(pVM, &pVM->aCpus[i].vmm.s.StatR0HaltExec, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltExec", i); 594 STAMR3RegisterF(pVM, &pVM->aCpus[i].vmm.s.StatR0HaltExecFromSpin, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltExec/FromSpin", i); 595 STAMR3RegisterF(pVM, &pVM->aCpus[i].vmm.s.StatR0HaltExecFromBlock, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltExec/FromBlock", i); 596 STAMR3RegisterF(pVM, &pVM->aCpus[i].vmm.s.cR0Halts, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltHistoryCounter", i); 597 STAMR3RegisterF(pVM, &pVM->aCpus[i].vmm.s.cR0HaltsSucceeded, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltHistorySucceeded", i); 598 STAMR3RegisterF(pVM, &pVM->aCpus[i].vmm.s.cR0HaltsToRing3, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltHistoryToRing3", i); 599 } 587 600 } 588 601 … … 2487 2500 VERR_IPE_UNEXPECTED_INFO_STATUS); 2488 2501 return VBOXSTRICTRC_VAL(rcStrict); 2502 } 2503 2504 2505 /** 2506 * Interface for vmR3SetHaltMethodU. 2507 * 2508 * @param pVCpu The cross context virtual CPU structure of the 2509 * calling EMT. 2510 * @param fMayHaltInRing0 The new state. 2511 * @param cNsSpinBlockThreshold The spin-vs-blocking threashold. 2512 * @thread EMT(pVCpu) 2513 * 2514 * @todo Move the EMT handling to VMM (or EM). I soooooo regret that VM 2515 * component. 2516 */ 2517 VMMR3_INT_DECL(void) VMMR3SetMayHaltInRing0(PVMCPU pVCpu, bool fMayHaltInRing0, uint32_t cNsSpinBlockThreshold) 2518 { 2519 pVCpu->vmm.s.fMayHaltInRing0 = fMayHaltInRing0; 2520 pVCpu->vmm.s.cNsSpinBlockThreshold = cNsSpinBlockThreshold; 2489 2521 } 2490 2522 -
trunk/src/VBox/VMM/include/VMMInternal.h
r73477 r75646 476 476 * attempts at recursive rendezvous. */ 477 477 bool volatile fInRendezvous; 478 bool afPadding[HC_ARCH_BITS == 32 ? 3+4 : 7+8];478 bool afPadding[HC_ARCH_BITS == 32 ? 2 : 6+4]; 479 479 /** @} */ 480 481 /** Whether we can HLT in VMMR0 rather than having to return to EM. 482 * Updated by vmR3SetHaltMethodU(). */ 483 bool fMayHaltInRing0; 484 /** The minimum delta for which we can HLT in ring-0 for. 485 * The deadlines we can calculate are from TM, so, if it's too close 486 * we should just return to ring-3 and run the timer wheel, no point 487 * in spinning in ring-0. 488 * Updated by vmR3SetHaltMethodU(). */ 489 uint32_t cNsSpinBlockThreshold; 490 /** Number of ring-0 halts (used for depreciating following values). */ 491 uint32_t cR0Halts; 492 /** Number of ring-0 halts succeeding (VINF_SUCCESS) recently. */ 493 uint32_t cR0HaltsSucceeded; 494 /** Number of ring-0 halts failing (VINF_EM_HALT) recently. */ 495 uint32_t cR0HaltsToRing3; 496 /** Padding */ 497 uint32_t u32Padding0; 480 498 481 499 /** @name Raw-mode context tracing data. … … 507 525 VMMR0JMPBUF CallRing3JmpBufR0; 508 526 /** @} */ 527 528 STAMPROFILE StatR0HaltBlock; 529 STAMPROFILE StatR0HaltBlockOnTime; 530 STAMPROFILE StatR0HaltBlockOverslept; 531 STAMPROFILE StatR0HaltBlockInsomnia; 532 STAMCOUNTER StatR0HaltExec; 533 STAMCOUNTER StatR0HaltExecFromBlock; 534 STAMCOUNTER StatR0HaltExecFromSpin; 535 STAMCOUNTER StatR0HaltToR3FromSpin; 509 536 } VMMCPU; 510 537 AssertCompileMemberAlignment(VMMCPU, TracerCtx, 8); -
trunk/src/VBox/VMM/include/VMMInternal.mac
r73477 r75646 121 121 .fInRendezvous resb 1 122 122 %if HC_ARCH_BITS == 32 123 .afPadding resb 3+4123 .afPadding resb 2 124 124 %else 125 .afPadding resb 7+8125 .afPadding resb 6+4 126 126 %endif 127 .fMayHaltInRing0 resb 1 128 .cNsSpinBlockThreshold resd 1 129 .cR0Halts resd 1 130 .cR0HaltsSucceeded resd 1 131 .cR0HaltsToRing3 resd 1 127 132 128 133 alignb 8
Note:
See TracChangeset
for help on using the changeset viewer.