Changeset 65241 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Jan 11, 2017 12:43:56 PM (8 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r65233 r65241 3448 3448 AssertRCReturn(rc, rc); 3449 3449 } 3450 3451 #ifndef IEM_VERIFICATION_MODE_FULL3452 /*3453 * Setup the virtualized-APIC accesses.3454 *3455 * Note! This can cause a longjumps to R3 due to the acquisition of the PGM lock3456 * in both PGMHandlerPhysicalReset() and IOMMMIOMapMMIOHCPage(), see @bugref{8721}.3457 */3458 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)3459 {3460 uint64_t u64MsrApicBase = APICGetBaseMsrNoCheck(pVCpu);3461 if (u64MsrApicBase != pVCpu->hm.s.vmx.u64MsrApicBase)3462 {3463 /* We only care about the APIC base MSR address and not the other bits. */3464 PVM pVM = pVCpu->CTX_SUFF(pVM);3465 Assert(pVM->hm.s.vmx.HCPhysApicAccess);3466 RTGCPHYS GCPhysApicBase;3467 GCPhysApicBase = u64MsrApicBase;3468 GCPhysApicBase &= PAGE_BASE_GC_MASK;3469 3470 /*3471 * We only need a single HC page as the APIC-access page for all VCPUs as it's used3472 * purely for causing VM-exits and not for data access within the actual page.3473 *3474 * The following check ensures we do the mapping on a per-VM basis as our APIC code3475 * does not allow different APICs to be mapped at different addresses on different VCPUs.3476 *3477 * In fact, we do not support remapping of the APIC base at all, see APICSetBaseMsr()3478 * so we just map this once per-VM.3479 */3480 if (ASMAtomicCmpXchgU64(&pVM->hm.s.vmx.GCPhysApicBase, GCPhysApicBase, 0 /* u64Old */))3481 {3482 /* Unalias any existing mapping. */3483 rc = PGMHandlerPhysicalReset(pVM, GCPhysApicBase);3484 AssertRCReturn(rc, rc);3485 3486 /* Map the HC APIC-access page in place of the MMIO page, also updates the shadow page tables3487 if necessary. */3488 Log4(("HM: VCPU%u: Mapped HC APIC-access page GCPhysApicBase=%#RGp\n", pVCpu->idCpu, GCPhysApicBase));3489 rc = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess,3490 X86_PTE_RW | X86_PTE_P);3491 AssertRCReturn(rc, rc);3492 }3493 3494 /* Update the per-VCPU cache of the APIC base MSR. */3495 pVCpu->hm.s.vmx.u64MsrApicBase = u64MsrApicBase;3496 }3497 }3498 #endif /* !IEM_VERIFICATION_MODE_FULL */3499 3450 } 3500 3451 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE); … … 8412 8363 * out-of-sync. Make sure to update the required fields 8413 8364 * before using them. 8365 * 8366 * @remarks No-long-jump zone!!! 8414 8367 */ 8415 8368 static VBOXSTRICTRC hmR0VmxLoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx) … … 8419 8372 AssertPtr(pMixedCtx); 8420 8373 HMVMX_ASSERT_PREEMPT_SAFE(); 8374 8375 VMMRZCallRing3Disable(pVCpu); 8376 Assert(VMMR0IsLogFlushDisabled(pVCpu)); 8421 8377 8422 8378 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu)); … … 8486 8442 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR2); 8487 8443 8444 VMMRZCallRing3Enable(pVCpu); 8445 8488 8446 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x); 8489 8447 return rc; … … 8560 8518 * out-of-sync. Make sure to update the required fields 8561 8519 * before using them. 8520 * 8521 * @remarks No-long-jump zone!!! 8562 8522 */ 8563 8523 static VBOXSTRICTRC hmR0VmxLoadGuestStateOptimal(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx) … … 8652 8612 else 8653 8613 return rcStrict; 8614 8615 #ifndef IEM_VERIFICATION_MODE_FULL 8616 /* 8617 * Setup the virtualized-APIC accesses. 8618 * 8619 * Note! This can cause a longjumps to R3 due to the acquisition of the PGM lock 8620 * in both PGMHandlerPhysicalReset() and IOMMMIOMapMMIOHCPage(), see @bugref{8721}. 8621 * 8622 * This is the reason we do it here and not in hmR0VmxLoadGuestState(). 8623 */ 8624 if ( !pVCpu->hm.s.vmx.u64MsrApicBase 8625 && (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC) 8626 && PDMHasApic(pVM)) 8627 { 8628 uint64_t u64MsrApicBase = APICGetBaseMsrNoCheck(pVCpu); 8629 Assert(u64MsrApicBase); 8630 Assert(pVM->hm.s.vmx.HCPhysApicAccess); 8631 8632 /* We only care about the APIC base MSR address and not the other bits. */ 8633 RTGCPHYS GCPhysApicBase; 8634 GCPhysApicBase = u64MsrApicBase; 8635 GCPhysApicBase &= PAGE_BASE_GC_MASK; 8636 8637 /* Unalias any existing mapping. */ 8638 int rc = PGMHandlerPhysicalReset(pVM, GCPhysApicBase); 8639 AssertRCReturn(rc, rc); 8640 8641 /* Map the HC APIC-access page in place of the MMIO page, also updates the shadow page tables if necessary. */ 8642 LogRel(("HM: VCPU%u: Mapped HC APIC-access page GCPhysApicBase=%#RGp\n", pVCpu->idCpu, GCPhysApicBase)); 8643 rc = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P); 8644 AssertRCReturn(rc, rc); 8645 8646 /* Update the per-VCPU cache of the APIC base MSR. */ 8647 pVCpu->hm.s.vmx.u64MsrApicBase = u64MsrApicBase; 8648 } 8649 #endif /* !IEM_VERIFICATION_MODE_FULL */ 8654 8650 8655 8651 if (TRPMHasTrap(pVCpu)) -
trunk/src/VBox/VMM/include/HMInternal.h
r65222 r65241 461 461 /** Virtual address of the identity page table used for real mode and protected mode without paging emulation in EPT mode. */ 462 462 R3PTRTYPE(PX86PD) pNonPagingModeEPTPageTable; 463 464 /** The guest's MSR APIC base address at which the APIC access page is mapped. */465 RTGCPHYS volatile GCPhysApicBase;466 463 467 464 /** Physical address of the APIC-access page. */
Note:
See TracChangeset
for help on using the changeset viewer.