- Timestamp:
- Aug 28, 2013 5:14:38 PM (12 years ago)
- svn:sync-xref-src-repo-rev:
- 88510
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r48044 r48130 196 196 { 197 197 /** The host's rflags/eflags. */ 198 RTCCUINTREG uE Flags;198 RTCCUINTREG uEflags; 199 199 #if HC_ARCH_BITS == 32 200 200 uint32_t u32Alignment0; … … 2365 2365 2366 2366 /** 2367 * Injects any pending events into the guest if the guest is in a state to2368 * receive them.2367 * Evaluates the event to be delivered to the guest and sets it as the pending 2368 * event. 2369 2369 * 2370 2370 * @param pVCpu Pointer to the VMCPU. 2371 2371 * @param pCtx Pointer to the guest-CPU context. 2372 2372 */ 2373 static void hmR0Svm InjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pCtx)2374 { 2375 Assert(! TRPMHasTrap(pVCpu));2373 static void hmR0SvmEvaluatePendingEvent(PVMCPU pVCpu, PCPUMCTX pCtx) 2374 { 2375 Assert(!pVCpu->hm.s.Event.fPending); 2376 2376 Log4Func(("\n")); 2377 2377 … … 2382 2382 SVMEVENT Event; 2383 2383 Event.u = 0; 2384 if (pVCpu->hm.s.Event.fPending) /* First, inject any pending HM events. */ 2385 { 2386 Event.u = pVCpu->hm.s.Event.u64IntrInfo; 2387 Assert(Event.n.u1Valid); 2388 bool fInject = true; 2389 if ( Event.n.u3Type == SVM_EVENT_EXTERNAL_IRQ 2390 && ( fBlockInt 2391 || fIntShadow)) 2392 { 2393 fInject = false; 2394 } 2395 else if ( Event.n.u3Type == SVM_EVENT_NMI 2396 && fIntShadow) 2397 { 2398 fInject = false; 2399 } 2400 2401 if (fInject) 2402 { 2403 Log4(("Injecting pending HM event.\n")); 2404 2405 hmR0SvmInjectEventVmcb(pVCpu, pVmcb, pCtx, &Event); 2406 pVCpu->hm.s.Event.fPending = false; 2407 2408 #ifdef VBOX_WITH_STATISTICS 2409 if (Event.n.u3Type == SVM_EVENT_EXTERNAL_IRQ) 2410 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt); 2411 else 2412 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt); 2413 #endif 2414 } 2415 else 2416 hmR0SvmSetVirtIntrIntercept(pVmcb); 2417 } /** @todo SMI. SMIs take priority over NMIs. */ 2418 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts . */ 2384 /** @todo SMI. SMIs take priority over NMIs. */ 2385 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts . */ 2419 2386 { 2420 2387 if (!fIntShadow) 2421 2388 { 2422 Log4((" Injecting NMI\n"));2389 Log4(("Pending NMI\n")); 2423 2390 2424 2391 Event.n.u1Valid = 1; … … 2426 2393 Event.n.u3Type = SVM_EVENT_NMI; 2427 2394 2428 hmR0Svm InjectEventVmcb(pVCpu, pVmcb, pCtx, &Event);2395 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */); 2429 2396 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI); 2430 2431 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt);2432 2397 } 2433 2398 else … … 2436 2401 else if (VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))) 2437 2402 { 2438 /* Check if there are guest external interrupts (PIC/APIC) pending and inject them, if the guest can receive them. */ 2403 /* 2404 * Check if the guest can receive external interrupts (PIC/APIC). Once we do PDMGetInterrupt() we -must- deliver 2405 * the interrupt ASAP. We must not execute any guest code until we inject the interrupt which is why it is 2406 * evaluated here and not set as pending, solely based on the force-flags. 2407 */ 2439 2408 if ( !fBlockInt 2440 2409 && !fIntShadow) … … 2450 2419 Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ; 2451 2420 2452 hmR0SvmInjectEventVmcb(pVCpu, pVmcb, pCtx, &Event); 2453 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt); 2421 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */); 2454 2422 } 2455 2423 else … … 2462 2430 else 2463 2431 hmR0SvmSetVirtIntrIntercept(pVmcb); 2432 } 2433 } 2434 2435 2436 /** 2437 * Injects any pending events into the guest if the guest is in a state to 2438 * receive them. 2439 * 2440 * @param pVCpu Pointer to the VMCPU. 2441 * @param pCtx Pointer to the guest-CPU context. 2442 */ 2443 static void hmR0SvmInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pCtx) 2444 { 2445 Assert(!TRPMHasTrap(pVCpu)); 2446 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 2447 Log4Func(("\n")); 2448 2449 const bool fIntShadow = !!hmR0SvmGetGuestIntrShadow(pVCpu, pCtx); 2450 const bool fBlockInt = !(pCtx->eflags.u32 & X86_EFL_IF); 2451 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 2452 2453 SVMEVENT Event; 2454 Event.u = 0; 2455 if (pVCpu->hm.s.Event.fPending) /* First, inject any pending HM events. */ 2456 { 2457 #if defined(VBOX_STRICT) || defined(VBOX_WITH_STATISTICS) 2458 Event.u = pVCpu->hm.s.Event.u64IntrInfo; 2459 Assert(Event.n.u1Valid); 2460 bool fInject = true; 2461 if (Event.n.u3Type == SVM_EVENT_EXTERNAL_IRQ) 2462 { 2463 Assert(!fBlockInt); 2464 Assert(!fIntShadow); 2465 } 2466 else if (Event.n.u3Type == SVM_EVENT_NMI) 2467 Assert(!fIntShadow); 2468 #endif 2469 2470 Log4(("Injecting pending HM event.\n")); 2471 hmR0SvmInjectEventVmcb(pVCpu, pVmcb, pCtx, &Event); 2472 pVCpu->hm.s.Event.fPending = false; 2473 2474 #ifdef VBOX_WITH_STATISTICS 2475 if (Event.n.u3Type == SVM_EVENT_EXTERNAL_IRQ) 2476 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt); 2477 else 2478 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt); 2479 #endif 2464 2480 } 2465 2481 … … 2719 2735 return rc; 2720 2736 2737 if (TRPMHasTrap(pVCpu)) 2738 hmR0SvmTrpmTrapToPendingEvent(pVCpu); 2739 else if (!pVCpu->hm.s.Event.fPending) 2740 hmR0SvmEvaluatePendingEvent(pVCpu, pCtx); 2741 2721 2742 #ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION 2722 /* We disable interrupts so that we don't miss any interrupts that would flag preemption (IPI/timers etc.) */ 2723 pSvmTransient->uEFlags = ASMIntDisableFlags(); 2724 if (RTThreadPreemptIsPending(NIL_RTTHREAD)) 2725 { 2726 ASMSetFlags(pSvmTransient->uEFlags); 2743 /* 2744 * We disable interrupts so that we don't miss any interrupts that would flag preemption (IPI/timers etc.) 2745 * when thread-context hooks aren't used and we've been running with preemption disabled for a while. 2746 * 2747 * We need to check for force-flags that could've possible been altered since we last checked them (e.g. 2748 * by PDMGetInterrupt() leaving the PDM critical section, see @bugref{6398}). 2749 * 2750 * We also check a couple of other force-flags as a last opportunity to get the EMT back to ring-3 before 2751 * executing guest code. 2752 */ 2753 pSvmTransient->uEflags = ASMIntDisableFlags(); 2754 if ( VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC) 2755 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK)) 2756 { 2757 ASMSetFlags(pSvmTransient->uEflags); 2758 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF); 2759 return VINF_EM_RAW_TO_R3; 2760 } 2761 else if (RTThreadPreemptIsPending(NIL_RTTHREAD)) 2762 { 2763 ASMSetFlags(pSvmTransient->uEflags); 2727 2764 STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq); 2728 /* Don't use VINF_EM_RAW_INTERRUPT_HYPER as we can't assume the host does kernel preemption. Maybe some day? */2729 2765 return VINF_EM_RAW_INTERRUPT; 2730 2766 } 2767 2768 /* Indicate the start of guest execution. No more longjmps or returns to ring-3 from this point!!! */ 2731 2769 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM); 2732 2770 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); 2733 2771 #endif 2734 2735 /* Convert any pending TRPM traps to HM events for injection. */2736 /** @todo Optimization: move this before disabling interrupts, restore state2737 * using pVmcb->ctrl.EventInject.u. */2738 if (TRPMHasTrap(pVCpu))2739 hmR0SvmTrpmTrapToPendingEvent(pVCpu);2740 2741 hmR0SvmInjectPendingEvent(pVCpu, pCtx);2742 2772 2743 2773 return VINF_SUCCESS; … … 2765 2795 #ifndef VBOX_WITH_VMMR0_DISABLE_PREEMPTION 2766 2796 /** @todo I don't see the point of this, VMMR0EntryFast() already disables interrupts for the entire period. */ 2767 pSvmTransient->uEFlags = ASMIntDisableFlags(); 2797 /** @todo get rid of this. */ 2798 pSvmTransient->uEflags = ASMIntDisableFlags(); 2768 2799 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); 2769 2800 #endif 2801 2802 hmR0SvmInjectPendingEvent(pVCpu, pCtx); 2770 2803 2771 2804 /* … … 2913 2946 2914 2947 Assert(!(ASMGetFlags() & X86_EFL_IF)); 2915 ASMSetFlags(pSvmTransient->uE Flags); /* Enable interrupts. */2948 ASMSetFlags(pSvmTransient->uEflags); /* Enable interrupts. */ 2916 2949 2917 2950 VMMRZCallRing3SetNotification(pVCpu, hmR0SvmCallRing3Callback, pMixedCtx); -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r48082 r48130 6345 6345 6346 6346 /** 6347 * Injects any pending events into the guest if the guest is in a state to 6348 * receive them. 6349 * 6350 * @returns VBox status code (informational status codes included). 6347 * Evaluates the event to be delivered to the guest and sets it as the pending 6348 * event. 6349 * 6351 6350 * @param pVCpu Pointer to the VMCPU. 6352 6351 * @param pMixedCtx Pointer to the guest-CPU context. The data may be … … 6354 6353 * before using them. 6355 6354 */ 6356 static int hmR0VmxInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 6357 { 6355 static void hmR0VmxEvaluatePendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 6356 { 6357 Assert(!pVCpu->hm.s.Event.fPending); 6358 6358 6359 /* Get the current interruptibility-state of the guest and then figure out what can be injected. */ 6359 6360 uint32_t uIntrState = hmR0VmxGetGuestIntrState(pVCpu, pMixedCtx); … … 6364 6365 Assert( !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI) /* We don't support block-by-NMI and SMI yet.*/ 6365 6366 && !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI)); 6366 Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF); 6367 Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */ 6367 6368 Assert(!TRPMHasTrap(pVCpu)); 6368 6369 6369 int rc = VINF_SUCCESS; 6370 if (pVCpu->hm.s.Event.fPending) /* First, inject any pending HM events. */ 6371 { 6372 uint32_t uIntrType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hm.s.Event.u64IntrInfo); 6373 bool fInject = true; 6374 if (uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT) 6375 { 6376 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); 6377 AssertRCReturn(rc, rc); 6378 const bool fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF); 6379 if ( fBlockInt 6380 || fBlockSti 6381 || fBlockMovSS) 6382 { 6383 fInject = false; 6384 } 6385 } 6386 else if ( uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI 6387 && ( fBlockMovSS 6388 || fBlockSti)) 6389 { 6390 /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */ 6391 fInject = false; 6392 } 6393 6394 if (fInject) 6395 { 6396 Log4(("Injecting pending event vcpu[%RU32]\n", pVCpu->idCpu)); 6397 rc = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, pVCpu->hm.s.Event.u64IntrInfo, pVCpu->hm.s.Event.cbInstr, 6398 pVCpu->hm.s.Event.u32ErrCode, pVCpu->hm.s.Event.GCPtrFaultAddress, &uIntrState); 6399 AssertRCReturn(rc, rc); 6400 pVCpu->hm.s.Event.fPending = false; 6401 6402 #ifdef VBOX_WITH_STATISTICS 6403 if (uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT) 6404 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt); 6405 else 6406 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt); 6407 #endif 6408 } 6409 else 6410 hmR0VmxSetIntWindowExitVmcs(pVCpu); 6411 } /** @todo SMI. SMIs take priority over NMIs. */ 6412 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts . */ 6370 /** @todo SMI. SMIs take priority over NMIs. */ 6371 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts . */ 6413 6372 { 6414 6373 /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */ … … 6416 6375 && !fBlockSti) 6417 6376 { 6418 Log4(("Injecting NMI\n")); 6377 /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */ 6378 Log4(("Pending NMI vcpu[%RU32]\n", pVCpu->idCpu)); 6419 6379 uint32_t u32IntrInfo = X86_XCPT_NMI | VMX_EXIT_INTERRUPTION_INFO_VALID; 6420 6380 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT); 6421 rc = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 6422 0 /* GCPtrFaultAddress */, &uIntrState); 6423 AssertRCReturn(rc, rc); 6381 6382 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddres */); 6424 6383 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI); 6425 6426 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt);6427 6384 } 6428 6385 else … … 6432 6389 && !pVCpu->hm.s.fSingleInstruction) 6433 6390 { 6434 /* Check if there are guest external interrupts (PIC/APIC) pending and inject them if the guest can receive them. */ 6435 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); 6436 AssertRCReturn(rc, rc); 6391 /* 6392 * Check if the guest can receive external interrupts (PIC/APIC). Once we do PDMGetInterrupt() we -must- deliver 6393 * the interrupt ASAP. We must not execute any guest code until we inject the interrupt which is why it is 6394 * evaluated here and not set as pending, solely based on the force-flags. 6395 */ 6396 int rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); 6397 AssertRC(rc); 6437 6398 const bool fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF); 6438 6399 if ( !fBlockInt … … 6444 6405 if (RT_SUCCESS(rc)) 6445 6406 { 6446 Log4((" Injecting interrupt u8Interrupt=%#x\n", u8Interrupt));6407 Log4(("Pending interrupt vcpu[%RU32] u8Interrupt=%#x \n", pVCpu->idCpu, u8Interrupt)); 6447 6408 uint32_t u32IntrInfo = u8Interrupt | VMX_EXIT_INTERRUPTION_INFO_VALID; 6448 6409 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT); 6449 rc = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 6450 0 /* GCPtrFaultAddress */, &uIntrState); 6451 6452 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt); 6410 6411 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrfaultAddress */); 6453 6412 } 6454 6413 else … … 6457 6416 Assert(!VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))); 6458 6417 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq); 6459 rc = VINF_SUCCESS;6460 6418 } 6461 6419 } … … 6463 6421 hmR0VmxSetIntWindowExitVmcs(pVCpu); 6464 6422 } 6465 6466 /* 6467 * Delivery pending debug exception if the guest is single-stepping. The interruptibility-state could have been changed by 6468 * hmR0VmxInjectEventVmcs() (e.g. real-on-v86 injecting software interrupts), re-evaluate it and set the BS bit. 6469 */ 6470 fBlockMovSS = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS); 6471 fBlockSti = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI); 6423 } 6424 6425 6426 /** 6427 * Injects any pending events into the guest if the guest is in a state to 6428 * receive them. 6429 * 6430 * @returns VBox status code (informational status codes included). 6431 * @param pVCpu Pointer to the VMCPU. 6432 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 6433 * out-of-sync. Make sure to update the required fields 6434 * before using them. 6435 * 6436 * @remarks No-long-jump zone!!! 6437 */ 6438 static int hmR0VmxInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 6439 { 6440 /* Get the current interruptibility-state of the guest and then figure out what can be injected. */ 6441 uint32_t uIntrState = hmR0VmxGetGuestIntrState(pVCpu, pMixedCtx); 6442 bool fBlockMovSS = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS); 6443 bool fBlockSti = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI); 6444 6445 Assert(!fBlockSti || (pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS)); 6446 Assert( !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI) /* We don't support block-by-NMI and SMI yet.*/ 6447 && !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI)); 6448 Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */ 6449 Assert(!TRPMHasTrap(pVCpu)); 6450 6451 int rc = VINF_SUCCESS; 6452 if (pVCpu->hm.s.Event.fPending) 6453 { 6454 #if defined(VBOX_STRICT) || defined(VBOX_WITH_STATISTICS) 6455 uint32_t uIntrType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hm.s.Event.u64IntrInfo); 6456 if (uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT) 6457 { 6458 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); 6459 AssertRCReturn(rc, rc); 6460 const bool fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF); 6461 Assert(!fBlockInt); 6462 Assert(!fBlockSti); 6463 Assert(!fBlockMovSS); 6464 } 6465 else if (uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI) 6466 { 6467 Assert(!fBlockSti); 6468 Assert(!fBlockMovSS); 6469 } 6470 #endif 6471 Log4(("Injecting pending event vcpu[%RU32] u64IntrInfo=%#RX64\n", pVCpu->idCpu, pVCpu->hm.s.Event.u64IntrInfo)); 6472 rc = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, pVCpu->hm.s.Event.u64IntrInfo, pVCpu->hm.s.Event.cbInstr, 6473 pVCpu->hm.s.Event.u32ErrCode, pVCpu->hm.s.Event.GCPtrFaultAddress, &uIntrState); 6474 AssertRCReturn(rc, rc); 6475 6476 pVCpu->hm.s.Event.fPending = false; 6477 6478 /* Update the interruptibility-state as it could have been changed by 6479 hmR0VmxInjectEventVmcs() (e.g. real-on-v86 guest injecting software interrupts) */ 6480 fBlockMovSS = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS); 6481 fBlockSti = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI); 6482 6483 #ifdef VBOX_WITH_STATISTICS 6484 if (uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT) 6485 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt); 6486 else 6487 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt); 6488 #endif 6489 } 6490 6491 /* Delivery pending debug exception if the guest is single-stepping. Evaluate and set the BS bit. */ 6472 6492 int rc2 = VINF_SUCCESS; 6473 6493 if ( fBlockSti … … 7323 7343 7324 7344 /* 7325 * When thread-context hooks are used, load the required guest-state bits 7326 * herebefore we go ahead and disable interrupts. We can handle getting preempted7345 * When thread-context hooks are used, load the required guest-state bits here 7346 * before we go ahead and disable interrupts. We can handle getting preempted 7327 7347 * while loading the guest state. 7328 7348 */ … … 7330 7350 hmR0VmxLoadGuestStateOptimal(pVM, pVCpu, pMixedCtx); 7331 7351 7352 /* 7353 * Evaluate events as pending-for-injection into the guest. Toggling of force-flags here is safe as long as 7354 * we update TRPM on premature exits to ring-3 before executing guest code. We must NOT restore the force-flags. 7355 */ 7356 if (TRPMHasTrap(pVCpu)) 7357 hmR0VmxTrpmTrapToPendingEvent(pVCpu); 7358 else if (!pVCpu->hm.s.Event.fPending) 7359 hmR0VmxEvaluatePendingEvent(pVCpu, pMixedCtx); 7360 7332 7361 #ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION 7333 /* We disable interrupts so that we don't miss any interrupts that would flag preemption (IPI/timers etc.) */ 7362 /* 7363 * We disable interrupts so that we don't miss any interrupts that would flag preemption (IPI/timers etc.) 7364 * when thread-context hooks aren't used and we've been running with preemption disabled for a while. 7365 * 7366 * We need to check for force-flags that could've possible been altered since we last checked them (e.g. 7367 * by PDMGetInterrupt() leaving the PDM critical section, see @bugref{6398}). 7368 * 7369 * We also check a couple of other force-flags as a last opportunity to get the EMT back to ring-3 before 7370 * executing guest code. 7371 */ 7334 7372 pVmxTransient->uEflags = ASMIntDisableFlags(); 7335 if (RTThreadPreemptIsPending(NIL_RTTHREAD)) 7373 if ( VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC) 7374 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK)) 7375 { 7376 ASMSetFlags(pVmxTransient->uEflags); 7377 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF); 7378 return VINF_EM_RAW_TO_R3; 7379 } 7380 else if (RTThreadPreemptIsPending(NIL_RTTHREAD)) 7336 7381 { 7337 7382 ASMSetFlags(pVmxTransient->uEflags); 7338 7383 STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq); 7339 /* Don't use VINF_EM_RAW_INTERRUPT_HYPER as we can't assume the host does kernel preemption. Maybe some day? */7340 7384 return VINF_EM_RAW_INTERRUPT; 7341 7385 } 7386 7387 /* Indicate the start of guest execution. No more longjmps or returns to ring-3 from this point!!! */ 7342 7388 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM); 7343 7389 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); … … 7345 7391 7346 7392 /* 7347 * Evaluates and injects any pending events, toggling force-flags and updating the guest-interruptibility 7348 * state (interrupt shadow) in the VMCS. This -can- potentially be reworked to be done before disabling 7349 * interrupts and handle returning to ring-3 afterwards, but requires very careful state restoration. 7393 * Event injection might result in triple-faulting the VM (real-on-v86 case), which is why it's 7394 * done here and not in hmR0VmxPreRunGuestCommitted() which doesn't expect failures. 7350 7395 */ 7351 /** @todo Rework event evaluation and injection to be completely separate.7352 * Update: Tried it, problem with handling halts. Control never returns to VT-x7353 * if we exit VT-x with external interrupt pending in a TRPM event.7354 * The EM loop probably needs to check for interrupts while halting. */7355 if (TRPMHasTrap(pVCpu))7356 hmR0VmxTrpmTrapToPendingEvent(pVCpu);7357 7358 7396 rc = hmR0VmxInjectPendingEvent(pVCpu, pMixedCtx); 7359 AssertRCReturn(rc, rc);7360 7397 return rc; 7361 7398 } … … 7383 7420 #ifndef VBOX_WITH_VMMR0_DISABLE_PREEMPTION 7384 7421 /** @todo I don't see the point of this, VMMR0EntryFast() already disables interrupts for the entire period. */ 7422 /** @todo get rid of this. */ 7385 7423 pVmxTransient->uEflags = ASMIntDisableFlags(); 7386 7424 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); … … 7389 7427 7390 7428 /* 7391 * Load the host state bits as we may've been preempted 7392 * (only happens whenthread-context hooks are used).7429 * Load the host state bits as we may've been preempted (only happens when 7430 * thread-context hooks are used). 7393 7431 */ 7394 int rc = VINF_SUCCESS;7395 7432 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT) 7396 7433 { 7397 7434 Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu)); 7398 rc = hmR0VmxSaveHostState(pVM, pVCpu);7435 int rc = hmR0VmxSaveHostState(pVM, pVCpu); 7399 7436 AssertRC(rc); 7400 7437 } … … 7402 7439 7403 7440 /* 7404 * When thread-context hooks are notused we need to load the required7405 * guest state bits here i.e. when we can no longer be preempted.7441 * When thread-context hooks are -not- used we need to load the required 7442 * guest state bits here i.e. when we can no longer be rescheduled. 7406 7443 */ 7407 7444 if (!VMMR0ThreadCtxHooksAreRegistered(pVCpu)) … … 7411 7448 /* 7412 7449 * If we are injecting events to a real-on-v86 mode guest, we may have to update 7413 * RIP and so emother registers, i.e. hmR0VmxInjectPendingEvent()->hmR0VmxInjectEventVmcs().7450 * RIP and some other registers, i.e. hmR0VmxInjectPendingEvent()->hmR0VmxInjectEventVmcs(). 7414 7451 * Reload only the necessary state, the assertion will catch if other parts of the code 7415 7452 * change. -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r48083 r48130 441 441 VMCPU_ASSERT_EMT(pVCpu); 442 442 Assert(pVCpu->vmm.s.hR0ThreadCtx == NIL_RTTHREADCTX); 443 #if 0 /* Not stable yet. */443 #if defined(RT_OS_LINUX) || defined(RT_OS_SOLARIS) 444 444 int rc = RTThreadCtxHooksCreate(&pVCpu->vmm.s.hR0ThreadCtx); 445 445 if ( RT_FAILURE(rc) … … 546 546 * infinitum). Let's just disable preemption for now... 547 547 */ 548 bool fPreemptDisabled = false; 549 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; 550 if (RTThreadPreemptIsEnabled(NIL_RTTHREAD)) 551 { 552 RTThreadPreemptDisable(&PreemptState); 553 fPreemptDisabled = true; 554 } 548 HM_DISABLE_PREEMPT_IF_NEEDED(); 555 549 556 550 /* We need to update the VCPU <-> host CPU mapping. */ … … 562 556 563 557 /* Restore preemption. */ 564 if (fPreemptDisabled) 565 RTThreadPreemptRestore(&PreemptState); 558 HM_RESTORE_PREEMPT_IF_NEEDED(); 566 559 break; 567 560 } -
trunk/src/VBox/VMM/VMMR3/EM.cpp
r47808 r48130 2527 2527 } 2528 2528 } 2529 else if (TRPMHasTrap(pVCpu)) 2530 rc = VINF_EM_RESCHEDULE; 2529 2531 else 2530 2532 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF)); -
trunk/src/VBox/VMM/include/HMInternal.h
r48044 r48130 136 136 | HM_CHANGED_GUEST_DEBUG) 137 137 /** @} */ 138 139 /** Disables preemption if required. */140 # define HM_DISABLE_PREEMPT_IF_NEEDED() \141 RTTHREADPREEMPTSTATE PreemptStateInternal = RTTHREADPREEMPTSTATE_INITIALIZER; \142 bool fPreemptDisabledInternal = false; \143 if (RTThreadPreemptIsEnabled(NIL_RTTHREAD)) \144 { \145 Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu)); \146 RTThreadPreemptDisable(&PreemptStateInternal); \147 fPreemptDisabledInternal = true; \148 }149 150 /** Restores preemption if previously disabled by HM_DISABLE_PREEMPT(). */151 # define HM_RESTORE_PREEMPT_IF_NEEDED() \152 do \153 { \154 if (fPreemptDisabledInternal) \155 RTThreadPreemptRestore(&PreemptStateInternal); \156 } while (0)157 138 158 139 /** Maximum number of page flushes we are willing to remember before considering a full TLB flush. */
Note:
See TracChangeset
for help on using the changeset viewer.