Changeset 65251 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Jan 12, 2017 7:30:01 AM (8 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r65241 r65251 3404 3404 * before using them. 3405 3405 * 3406 * @remarks Can cause longjumps!!!3406 * @remarks No-long-jump zone!!! 3407 3407 */ 3408 3408 DECLINLINE(int) hmR0VmxLoadGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx) … … 8373 8373 HMVMX_ASSERT_PREEMPT_SAFE(); 8374 8374 8375 VMMRZCallRing3Disable(pVCpu);8376 Assert(VMMR0IsLogFlushDisabled(pVCpu));8377 8378 8375 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu)); 8379 8376 … … 8442 8439 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR2); 8443 8440 8444 VMMRZCallRing3Enable(pVCpu);8445 8446 8441 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x); 8447 8442 return rc; … … 8524 8519 { 8525 8520 HMVMX_ASSERT_PREEMPT_SAFE(); 8521 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 8522 Assert(VMMR0IsLogFlushDisabled(pVCpu)); 8526 8523 8527 8524 Log5(("LoadFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu))); … … 8530 8527 #endif 8531 8528 8529 /* 8530 * RIP is what changes the most often and hence if it's the only bit needing to be 8531 * updated, we shall handle it early for performance reasons. 8532 */ 8532 8533 VBOXSTRICTRC rcStrict = VINF_SUCCESS; 8533 8534 if (HMCPU_CF_IS_SET_ONLY(pVCpu, HM_CHANGED_GUEST_RIP)) … … 8626 8627 && PDMHasApic(pVM)) 8627 8628 { 8628 uint64_t u64MsrApicBase = APICGetBaseMsrNoCheck(pVCpu);8629 uint64_t const u64MsrApicBase = APICGetBaseMsrNoCheck(pVCpu); 8629 8630 Assert(u64MsrApicBase); 8630 8631 Assert(pVM->hm.s.vmx.HCPhysApicAccess); 8631 8632 8632 /* We only care about the APIC base MSR address and not the other bits. */ 8633 RTGCPHYS GCPhysApicBase; 8634 GCPhysApicBase = u64MsrApicBase; 8635 GCPhysApicBase &= PAGE_BASE_GC_MASK; 8633 RTGCPHYS const GCPhysApicBase = u64MsrApicBase & PAGE_BASE_GC_MASK; 8636 8634 8637 8635 /* Unalias any existing mapping. */ … … 8640 8638 8641 8639 /* Map the HC APIC-access page in place of the MMIO page, also updates the shadow page tables if necessary. */ 8642 LogRel((" HM: VCPU%u: Mapped HC APIC-access page GCPhysApicBase=%#RGp\n", pVCpu->idCpu, GCPhysApicBase));8640 LogRel(("hmR0VmxPreRunGuest: VCPU%u: Mapped HC APIC-access page at %#RGp\n", pVCpu->idCpu, GCPhysApicBase)); 8643 8641 rc = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P); 8644 8642 AssertRCReturn(rc, rc); … … 8668 8666 8669 8667 /* 8670 * Load the guest state bits, we can handle longjmps/getting preempted here. 8668 * No longjmps to ring-3 from this point on!!! 8669 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic. 8670 * This also disables flushing of the R0-logger instance (if any). 8671 */ 8672 VMMRZCallRing3Disable(pVCpu); 8673 8674 /* 8675 * Load the guest state bits. 8676 * 8677 * We cannot perform longjmps while loading the guest state because we do not preserve the 8678 * host/guest state (although the VMCS will be preserved) across longjmps which can cause 8679 * CPU migration. 8671 8680 * 8672 8681 * If we are injecting events to a real-on-v86 mode guest, we will have to update 8673 8682 * RIP and some segment registers, i.e. hmR0VmxInjectPendingEvent()->hmR0VmxInjectEventVmcs(). 8674 * Hence, thisneeds to be done -after- injection of events.8683 * Hence, loading of the guest state needs to be done -after- injection of events. 8675 8684 */ 8676 8685 rcStrict = hmR0VmxLoadGuestStateOptimal(pVM, pVCpu, pMixedCtx); … … 8678 8687 { /* likely */ } 8679 8688 else 8689 { 8690 VMMRZCallRing3Enable(pVCpu); 8680 8691 return rcStrict; 8681 8682 /* 8683 * No longjmps to ring-3 from this point on!!! 8684 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic. 8685 * This also disables flushing of the R0-logger instance (if any). 8686 */ 8687 VMMRZCallRing3Disable(pVCpu); 8692 } 8688 8693 8689 8694 /*
Note:
See TracChangeset
for help on using the changeset viewer.