Changeset 92453 in vbox for trunk/src/VBox/VMM/VMMR3
- Timestamp:
- Nov 16, 2021 10:44:37 AM (3 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR3/NEMR3Native-darwin.cpp
r92376 r92453 967 967 APICSetTpr(pVCpu, u64Cr8); 968 968 } 969 if (fWhat & CPUMCTX_EXTRN_XCRx) 970 READ_GREG(HV_X86_XCR0, pVCpu->cpum.GstCtx.aXcr[0]); 969 971 970 972 /* Debug registers. */ … … 1087 1089 pVCpu->cpum.GstCtx.fExtrn = 0; 1088 1090 1091 #ifdef LOG_ENABLED 1092 nemR3DarwinLogState(pVM, pVCpu); 1093 #endif 1094 1089 1095 /* Typical. */ 1090 1096 if (!fMaybeChangedMode && !fUpdateCr3) … … 1262 1268 1263 1269 1270 #if 0 /* unused */ 1264 1271 DECL_FORCE_INLINE(bool) nemR3DarwinIsVmxLbr(PCVMCC pVM) 1265 1272 { … … 1267 1274 return false; 1268 1275 } 1276 #endif 1269 1277 1270 1278 … … 1274 1282 #define IN_NEM_DARWIN 1275 1283 //#define HMVMX_ALWAYS_TRAP_ALL_XCPTS 1276 #define HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE1284 //#define HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE 1277 1285 #define VCPU_2_VMXSTATE(a_pVCpu) (a_pVCpu)->nem.s 1278 1286 #define VCPU_2_VMXSTATS(a_pVCpu) (*(a_pVCpu)->nem.s.pVmxStats) … … 1539 1547 RT_NOREF(pVM); 1540 1548 1549 #ifdef LOG_ENABLED 1550 nemR3DarwinLogState(pVM, pVCpu); 1551 #endif 1552 1541 1553 uint64_t const fWhat = ~pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL; 1542 1554 if (!fWhat) … … 1571 1583 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 1572 1584 1585 if (fWhat & CPUMCTX_EXTRN_XCRx) 1586 { 1587 WRITE_GREG(HV_X86_XCR0, pVCpu->cpum.GstCtx.aXcr[0]); 1588 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_XCRx); 1589 } 1590 1573 1591 if (fWhat & CPUMCTX_EXTRN_APIC_TPR) 1592 { 1574 1593 WRITE_GREG(HV_X86_TPR, CPUMGetGuestCR8(pVCpu)); 1594 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR); 1595 } 1575 1596 1576 1597 /* Debug registers. */ … … 1581 1602 WRITE_GREG(HV_X86_DR2, pVCpu->cpum.GstCtx.dr[2]); // CPUMGetHyperDR2(pVCpu)); 1582 1603 WRITE_GREG(HV_X86_DR3, pVCpu->cpum.GstCtx.dr[3]); // CPUMGetHyperDR3(pVCpu)); 1604 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_DR0_DR3); 1583 1605 } 1584 1606 if (fWhat & CPUMCTX_EXTRN_DR6) 1607 { 1585 1608 WRITE_GREG(HV_X86_DR6, pVCpu->cpum.GstCtx.dr[6]); // CPUMGetHyperDR6(pVCpu)); 1609 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_DR6); 1610 } 1586 1611 if (fWhat & CPUMCTX_EXTRN_DR7) 1612 { 1587 1613 WRITE_GREG(HV_X86_DR7, pVCpu->cpum.GstCtx.dr[7]); // CPUMGetHyperDR7(pVCpu)); 1588 1589 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX)) 1614 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_DR7); 1615 } 1616 1617 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE)) 1590 1618 { 1591 1619 hv_return_t hrc = hv_vcpu_write_fpstate(pVCpu->nem.s.hVCpuId, &pVCpu->cpum.GstCtx.XState, sizeof(pVCpu->cpum.GstCtx.XState)); … … 1594 1622 else 1595 1623 return nemR3DarwinHvSts2Rc(hrc); 1624 1625 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~(HM_CHANGED_GUEST_X87 | HM_CHANGED_GUEST_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE)); 1596 1626 } 1597 1627 1598 1628 /* MSRs */ 1599 1629 if (fWhat & CPUMCTX_EXTRN_EFER) 1630 { 1600 1631 WRITE_VMCS_FIELD(VMX_VMCS64_GUEST_EFER_FULL, pVCpu->cpum.GstCtx.msrEFER); 1632 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_EFER_MSR); 1633 } 1601 1634 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE) 1635 { 1602 1636 WRITE_MSR(MSR_K8_KERNEL_GS_BASE, pVCpu->cpum.GstCtx.msrKERNELGSBASE); 1637 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_KERNEL_GS_BASE); 1638 } 1603 1639 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS) 1604 1640 { … … 1606 1642 WRITE_MSR(MSR_IA32_SYSENTER_EIP, pVCpu->cpum.GstCtx.SysEnter.eip); 1607 1643 WRITE_MSR(MSR_IA32_SYSENTER_ESP, pVCpu->cpum.GstCtx.SysEnter.esp); 1644 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_MSR_MASK); 1608 1645 } 1609 1646 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS) … … 1613 1650 WRITE_MSR(MSR_K8_CSTAR, pVCpu->cpum.GstCtx.msrCSTAR); 1614 1651 WRITE_MSR(MSR_K8_SF_MASK, pVCpu->cpum.GstCtx.msrSFMASK); 1652 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSCALL_MSRS); 1615 1653 } 1616 1654 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS) … … 1619 1657 if (RT_UNLIKELY(hrc != HV_SUCCESS)) 1620 1658 return nemR3DarwinHvSts2Rc(hrc); 1659 1660 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_OTHER_MSRS); 1621 1661 1622 1662 #if 0 … … 1652 1692 WRITE_VMCS_FIELD(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0 /*MSR_IA32_DEBUGCTL_LBR*/); 1653 1693 1654 #if 0 /** @todo */1655 WRITE_GREG(HV_X86_TSS_BASE, );1656 WRITE_GREG(HV_X86_TSS_LIMIT, );1657 WRITE_GREG(HV_X86_TSS_AR, );1658 WRITE_GREG(HV_X86_XCR0, );1659 #endif1660 1661 1694 hv_vcpu_invalidate_tlb(pVCpu->nem.s.hVCpuId); 1662 1695 hv_vcpu_flush(pVCpu->nem.s.hVCpuId); … … 1665 1698 1666 1699 /* Clear any bits that may be set but exported unconditionally or unused/reserved bits. */ 1667 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~( (HM_CHANGED_GUEST_GPRS_MASK & ~HM_CHANGED_GUEST_RSP) 1668 | HM_CHANGED_GUEST_CR2 1669 | (HM_CHANGED_GUEST_DR_MASK & ~HM_CHANGED_GUEST_DR7) 1670 | HM_CHANGED_GUEST_X87 1671 | HM_CHANGED_GUEST_SSE_AVX 1672 | HM_CHANGED_GUEST_OTHER_XSAVE 1673 | HM_CHANGED_GUEST_XCRx 1674 | HM_CHANGED_GUEST_KERNEL_GS_BASE /* Part of lazy or auto load-store MSRs. */ 1675 | HM_CHANGED_GUEST_SYSCALL_MSRS /* Part of lazy or auto load-store MSRs. */ 1676 | HM_CHANGED_GUEST_TSC_AUX 1677 | HM_CHANGED_GUEST_OTHER_MSRS 1700 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~( 1701 HM_CHANGED_GUEST_TSC_AUX 1702 | HM_CHANGED_GUEST_HWVIRT 1703 | HM_CHANGED_VMX_GUEST_AUTO_MSRS 1704 | HM_CHANGED_VMX_GUEST_LAZY_MSRS 1678 1705 | (HM_CHANGED_KEEPER_STATE_MASK & ~HM_CHANGED_VMX_MASK))); 1679 1706 … … 1708 1735 VERR_NEM_IPE_0); 1709 1736 1710 /** @todo Only copy the state on demand (requires changing to adhere to fCtxChanged from th VMX code 1711 * flags instead of the fExtrn one living in CPUM. 1712 */ 1713 rc = nemR3DarwinCopyStateFromHv(pVM, pVCpu, UINT64_MAX); 1737 /** @todo Only copy the state on demand (the R0 VT-x code saves some stuff unconditionally and the VMX template assumes that 1738 * when handling exits). */ 1739 rc = nemR3DarwinCopyStateFromHv(pVM, pVCpu, CPUMCTX_EXTRN_ALL); 1714 1740 AssertRCReturn(rc, rc); 1715 1741 … … 1723 1749 1724 1750 /** 1725 * Worker for nemR3NativeInit that loads the Hypervisor.fram work shared library.1751 * Worker for nemR3NativeInit that loads the Hypervisor.framework shared library. 1726 1752 * 1727 1753 * @returns VBox status code. … … 2389 2415 if (pVM->nem.s.fCreatedAsid) 2390 2416 { 2391 h v_return_t hrc = hv_vcpu_set_space(pVCpu->nem.s.hVCpuId, pVM->nem.s.uVmAsid);2417 hrc = hv_vcpu_set_space(pVCpu->nem.s.hVCpuId, pVM->nem.s.uVmAsid); 2392 2418 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_VM_CREATE_FAILED); 2393 2419 } … … 2476 2502 */ 2477 2503 hv_return_t hrc = hv_vcpu_destroy(pVCpu->nem.s.hVCpuId); 2478 Assert(hrc == HV_SUCCESS); 2504 Assert(hrc == HV_SUCCESS); RT_NOREF(hrc); 2479 2505 2480 2506 if (pVCpu->nem.s.pVmxStats) … … 2490 2516 { 2491 2517 hv_return_t hrc = hv_vm_space_destroy(pVM->nem.s.uVmAsid); 2492 Assert(hrc == HV_SUCCESS); 2518 Assert(hrc == HV_SUCCESS); RT_NOREF(hrc); 2493 2519 pVM->nem.s.fCreatedAsid = false; 2494 2520 } … … 2624 2650 pVCpu->nem.s.Event.fPending = false; 2625 2651 2652 Assert(!pVCpu->nem.s.fCtxChanged); 2626 2653 hv_return_t hrc; 2627 2654 if (hv_vcpu_run_until) … … 2645 2672 break; 2646 2673 } 2674 //Assert(!pVCpu->cpum.GstCtx.fExtrn); 2647 2675 } 2648 2676 else … … 2771 2799 LogFlowFunc(("pVM=%p pVCpu=%p fFlags=%#x\n", pVM, pVCpu, fFlags)); 2772 2800 2801 RT_NOREF(pVM, fFlags); 2802 2773 2803 hv_return_t hrc = hv_vcpu_interrupt(&pVCpu->nem.s.hVCpuId, 1); 2774 2804 if (hrc != HV_SUCCESS) … … 2813 2843 void *pvRam, void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange) 2814 2844 { 2815 RT_NOREF(pVM, puNemRange );2845 RT_NOREF(pVM, puNemRange, pvRam, fFlags); 2816 2846 2817 2847 Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p (%d)\n", … … 3069 3099 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n", 3070 3100 GCPhys, HCPhys, fPageProt, enmType, *pu2State)); 3071 RT_NOREF _PV(HCPhys); RT_NOREF_PV(enmType);3101 RT_NOREF(HCPhys, fPageProt, enmType); 3072 3102 3073 3103 return nemHCJustUnmapPage(pVM, GCPhys, pu2State); … … 3106 3136 VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat) 3107 3137 { 3138 LogFlowFunc(("pVCpu=%p fWhat=%RX64\n", pVCpu, fWhat)); 3108 3139 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand); 3109 3140 … … 3163 3194 hv_return_t hrc = hv_vm_sync_tsc(uPausedTscValue); 3164 3195 if (RT_LIKELY(hrc == HV_SUCCESS)) 3196 { 3197 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_TSC_AUX); 3165 3198 return VINF_SUCCESS; 3199 } 3166 3200 3167 3201 return nemR3DarwinHvSts2Rc(hrc); … … 3169 3203 3170 3204 3205 /** 3206 * Returns features supported by the NEM backend. 3207 * 3208 * @returns Flags of features supported by the native NEM backend. 3209 * @param pVM The cross context VM structure. 3210 */ 3211 VMM_INT_DECL(uint32_t) NEMHCGetFeatures(PVMCC pVM) 3212 { 3213 RT_NOREF(pVM); 3214 /* 3215 * Apple's Hypervisor.framework is not supported if the CPU doesn't support nested paging 3216 * and unrestricted guest execution support so we can safely return these flags here always. 3217 */ 3218 return NEM_FEAT_F_NESTED_PAGING | NEM_FEAT_F_FULL_GST_EXEC | NEM_FEAT_F_XSAVE_XRSTOR; 3219 } 3220 3221 3171 3222 /** @page pg_nem_darwin NEM/darwin - Native Execution Manager, macOS. 3172 3223 *
Note:
See TracChangeset
for help on using the changeset viewer.