Changeset 79577 in vbox
- Timestamp:
- Jul 7, 2019 3:09:59 PM (6 years ago)
- svn:sync-xref-src-repo-rev:
- 131867
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r79564 r79577 1778 1778 Assert(!pVmcsInfo->pvShadowVmcs); 1779 1779 1780 /* The host-physical address of the virtual-APIC page in guest memory is taken directly. */ 1781 Assert(pVmcsInfo->HCPhysVirtApic == NIL_RTHCPHYS); 1782 Assert(!pVmcsInfo->pbVirtApic); 1780 /* Get the allocated virtual-APIC page from CPUM. */ 1781 if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_TPR_SHADOW) 1782 { 1783 pVmcsInfo->pbVirtApic = (uint8_t *)CPUMGetGuestVmxVirtApicPage(pVCpu, &pVCpu->cpum.GstCtx, 1784 &pVmcsInfo->HCPhysVirtApic); 1785 Assert(pVmcsInfo->pbVirtApic); 1786 Assert(pVmcsInfo->HCPhysVirtApic && pVmcsInfo->HCPhysVirtApic != NIL_RTHCPHYS); 1787 } 1783 1788 } 1784 1789 … … 10939 10944 * Virtual-APIC page and TPR threshold. 10940 10945 * 10941 * We shall use the host-physical address of the virtual-APIC page in guest memory 10942 * directly. For this reason, we can access the virtual-APIC page of the nested-guest only 10943 * using PGM physical handlers as we must not assume a kernel virtual-address mapping 10944 * exists and requesting PGM for a mapping could be expensive/resource intensive (PGM 10945 * mapping cache). 10946 */ 10947 RTHCPHYS HCPhysVirtApic = NIL_RTHCPHYS; 10948 uint32_t const u32TprThreshold = pVmcsNstGst->u32TprThreshold; 10946 * The virtual-APIC page has already been allocated (by CPUM during VM startup) and cached 10947 * from guest memory as part of VMLAUNCH/VMRESUME instruction emulation. The host physical 10948 * address has also been updated in the nested-guest VMCS. 10949 */ 10950 PVMXVMCSINFO pVmcsInfoNstGst = &pVCpu->hm.s.vmx.VmcsInfoNstGst; 10951 RTHCPHYS HCPhysVirtApic; 10952 uint32_t u32TprThreshold; 10949 10953 if (u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW) 10950 10954 { 10951 int rc = PGMPhysGCPhys2HCPhys(pVM, pVmcsNstGst->u64AddrVirtApic.u, &HCPhysVirtApic); 10952 10953 /* 10954 * If the guest hypervisor has loaded crap into the virtual-APIC page field 10955 * we would fail to obtain a valid host-physical address for its guest-physical 10956 * address. 10957 * 10958 * We currently do not support this scenario. Maybe in the future if there is a 10959 * pressing need we can explore making this particular set of conditions work. 10960 * Right now we just cause a VM-entry failure. 10961 * 10962 * This has already been checked by VMLAUNCH/VMRESUME instruction emulation, 10963 * so should not really failure at the moment. 10964 */ 10965 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 10955 HCPhysVirtApic = pVmcsInfoNstGst->HCPhysVirtApic; 10956 u32TprThreshold = pVmcsNstGst->u32TprThreshold; 10966 10957 } 10967 10958 else 10968 10959 { 10960 HCPhysVirtApic = 0; 10961 u32TprThreshold = 0; 10962 10969 10963 /* 10970 10964 * We must make sure CR8 reads/write must cause VM-exits when TPR shadowing is not … … 10982 10976 * Validate basic assumptions. 10983 10977 */ 10984 PVMXVMCSINFO pVmcsInfoNstGst = &pVCpu->hm.s.vmx.VmcsInfoNstGst;10985 10978 Assert(pVM->hm.s.vmx.fAllowUnrestricted); 10986 10979 Assert(pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS); … … 11014 11007 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_WINDOW, cPleWindowTicks); 11015 11008 } 11016 if (u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW) 11017 { 11018 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold); 11019 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL, HCPhysVirtApic); 11020 } 11009 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold); 11010 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL, pVmcsInfoNstGst->HCPhysVirtApic); 11021 11011 AssertRCReturn(rc, rc); 11022 11012 … … 11583 11573 /* 11584 11574 * Sync the TPR shadow with our APIC state. 11575 * 11576 * With nested-guests, mark the virtual-APIC page as dirty so it can be synced 11577 * when performing the nested-guest VM-exit. 11585 11578 */ 11586 if ( !pVmxTransient->fIsNestedGuest 11587 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)) 11579 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW) 11588 11580 { 11589 Assert(pVmcsInfo->pbVirtApic); 11590 if (pVmxTransient->u8GuestTpr != pVmcsInfo->pbVirtApic[XAPIC_OFF_TPR]) 11581 if (!pVmxTransient->fIsNestedGuest) 11591 11582 { 11592 rc = APICSetTpr(pVCpu, pVmcsInfo->pbVirtApic[XAPIC_OFF_TPR]); 11593 AssertRC(rc); 11594 ASMAtomicOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR); 11583 Assert(pVmcsInfo->pbVirtApic); 11584 if (pVmxTransient->u8GuestTpr != pVmcsInfo->pbVirtApic[XAPIC_OFF_TPR]) 11585 { 11586 rc = APICSetTpr(pVCpu, pVmcsInfo->pbVirtApic[XAPIC_OFF_TPR]); 11587 AssertRC(rc); 11588 ASMAtomicOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR); 11589 } 11595 11590 } 11591 else 11592 pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtApicPageDirty = true; 11596 11593 } 11597 11594 … … 11602 11599 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 11603 11600 else if (pVmxTransient->fIsNestedGuest) 11604 {11605 # if 011606 /*11607 * Copy the VM-instruction error field to the guest VMCS.11608 */11609 /** @todo NSTVMX: Verify we're using the fast path. */11610 uint32_t u32RoVmInstrError;11611 rc = VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &u32RoVmInstrError);11612 AssertRCReturn(rc, rc);11613 PVMXVVMCS pGstVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);11614 pGstVmcs->u32RoVmInstrError = u32RoVmInstrError;11615 /** @todo NSTVMX: Advance guest RIP and other fast path related restoration. */11616 # else11617 11601 AssertMsgFailed(("VMLAUNCH/VMRESUME failed but shouldn't happen when VMLAUNCH/VMRESUME was emulated in IEM!\n")); 11618 # endif11619 }11620 11602 #endif 11621 11603 else
Note:
See TracChangeset
for help on using the changeset viewer.