- Timestamp:
- Feb 16, 2018 4:24:43 PM (7 years ago)
- svn:sync-xref-src-repo-rev:
- 120903
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
r71031 r71040 4759 4759 * @param pVM The cross context VM structure. 4760 4760 * @param pVCpu The cross context per virtual CPU structure. 4761 * @param GCPhys The guest physical address. We'll apply A20 masking 4762 * to this since most of the native hypervisor APIs 4763 * doesn't seem to implement A20 masking. 4761 * Optional. 4762 * @param GCPhys The guest physical address. 4763 * @param fMakeWritable Whether to try make the page writable or not. If it 4764 * cannot be made writable, NEM_PAGE_PROT_WRITE won't 4765 * be returned and the return code will be unaffected 4764 4766 * @param pInfo Where to return the page information. This is 4765 4767 * initialized even on failure. 4766 * @param pfnChecker Page in-sync checker callback. 4768 * @param pfnChecker Page in-sync checker callback. Optional. 4767 4769 * @param pvUser User argument to pass to pfnChecker. 4768 4770 */ 4769 VMM_INT_DECL(int) PGMPhysNem QueryPageInfo(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, PPGMPHYSNEMPAGEINFO pInfo,4770 PFNPGMPHYSNEMQUERYCHECKER pfnChecker, void *pvUser)4771 VMM_INT_DECL(int) PGMPhysNemPageInfoChecker(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, bool fMakeWritable, PPGMPHYSNEMPAGEINFO pInfo, 4772 PFNPGMPHYSNEMQUERYCHECKER pfnChecker, void *pvUser) 4771 4773 { 4772 4774 pgmLock(pVM); 4773 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhys);4774 4775 4775 4776 PPGMPAGE pPage; … … 4777 4778 if (RT_SUCCESS(rc)) 4778 4779 { 4780 /* Try make it writable if requested. */ 4781 if (fMakeWritable) 4782 switch (PGM_PAGE_GET_STATE(pPage)) 4783 { 4784 case PGM_PAGE_STATE_SHARED: 4785 case PGM_PAGE_STATE_WRITE_MONITORED: 4786 case PGM_PAGE_STATE_ZERO: 4787 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys); 4788 if (rc == VERR_PGM_PHYS_PAGE_RESERVED) 4789 rc = VINF_SUCCESS; 4790 break; 4791 } 4792 4779 4793 /* Fill in the info. */ 4780 4794 pInfo->HCPhys = PGM_PAGE_GET_HCPHYS(pPage); … … 4783 4797 PGMPAGETYPE const enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage); 4784 4798 pInfo->enmType = enmType; 4785 /** @todo Consider merging pgmPhysPageCalcNemProtection into the switch below. */4786 4799 pInfo->fNemProt = pgmPhysPageCalcNemProtection(pPage, enmType); 4787 4800 switch (PGM_PAGE_GET_STATE(pPage)) … … 4813 4826 4814 4827 /* Call the checker and update NEM state. */ 4815 rc = pfnChecker(pVM, pVCpu, GCPhys, pInfo, pvUser); 4816 PGM_PAGE_SET_NEM_STATE(pPage, pInfo->u2NemState); 4828 if (pfnChecker) 4829 { 4830 rc = pfnChecker(pVM, pVCpu, GCPhys, pInfo, pvUser); 4831 PGM_PAGE_SET_NEM_STATE(pPage, pInfo->u2NemState); 4832 } 4817 4833 4818 4834 /* Done. */ -
trunk/src/VBox/VMM/VMMR3/NEMR3.cpp
r70980 r71040 223 223 * Used to bring up secondary CPUs on SMP as well as CPU hot plugging. 224 224 * 225 * @param pVCpu The cross context virtual CPU structure to reset. 226 */ 227 VMMR3_INT_DECL(void) NEMR3ResetCpu(PVMCPU pVCpu) 225 * @param pVCpu The cross context virtual CPU structure to reset. 226 * @param fInitIpi Set if being reset due to INIT IPI. 227 */ 228 VMMR3_INT_DECL(void) NEMR3ResetCpu(PVMCPU pVCpu, bool fInitIpi) 228 229 { 229 230 #ifdef VBOX_WITH_NATIVE_NEM 230 231 if (pVCpu->pVMR3->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API) 231 nemR3NativeResetCpu(pVCpu );232 #else 233 RT_NOREF(pVCpu );232 nemR3NativeResetCpu(pVCpu, fInitIpi); 233 #else 234 RT_NOREF(pVCpu, fInitIpi); 234 235 #endif 235 236 } … … 270 271 #endif 271 272 } 273 274 275 VMMR3_INT_DECL(void) NEMR3NotifyFF(PVM pVM, PVMCPU pVCpu, uint32_t fFlags) 276 { 277 AssertLogRelReturnVoid(VM_IS_NEM_ENABLED(pVM)); 278 #ifdef VBOX_WITH_NATIVE_NEM 279 nemR3NativeNotifyFF(pVM, pVCpu, fFlags); 280 #else 281 RT_NOREF(pVM, pVCpu, fFlags); 282 #endif 283 } 284 272 285 273 286 -
trunk/src/VBox/VMM/VMMR3/NEMR3Native-win.cpp
r71035 r71040 5 5 * Log group 2: Exit logging. 6 6 * Log group 3: Log context on exit. 7 * Log group 12: API intercepts. 7 8 */ 8 9 … … 69 70 #define NEM_WIN_PAGE_STATE_WRITABLE 3 70 71 /** @} */ 72 73 /** Checks if a_GCPhys is subject to the limited A20 gate emulation. */ 74 #define NEM_WIN_IS_SUBJECT_TO_A20(a_GCPhys) ((RTGCPHYS)((a_GCPhys) - _1M) < (RTGCPHYS)_64K) 75 76 /** Checks if a_GCPhys is relevant to the limited A20 gate emulation. */ 77 #define NEM_WIN_IS_RELEVANT_TO_A20(a_GCPhys) \ 78 ( ((RTGCPHYS)((a_GCPhys) - _1M) < (RTGCPHYS)_64K) || ((RTGCPHYS)(a_GCPhys) < (RTGCPHYS)_64K) ) 71 79 72 80 … … 149 157 #endif 150 158 159 /** NEM_WIN_PAGE_STATE_XXX names. */ 160 static const char * const g_apszPageStates[4] = { "not-set", "unmapped", "readable", "writable" }; 161 /** WHV_MEMORY_ACCESS_TYPE names */ 162 static const char * const g_apszWHvMemAccesstypes[4] = { "read", "write", "exec", "!undefined!" }; 163 151 164 152 165 /********************************************************************************************************************************* 153 166 * Internal Functions * 154 167 *********************************************************************************************************************************/ 155 static int nemR3NativeSetPhysPage(PVM pVM, RTGCPHYS GCPhys, uint32_t fPageProt, uint8_t *pu2State, bool fBackingChanged); 168 static int nemR3NativeSetPhysPage(PVM pVM, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst, uint32_t fPageProt, 169 uint8_t *pu2State, bool fBackingChanged); 156 170 157 171 … … 174 188 pvInput, cbInput, pvOutput, cbOutput); 175 189 if (!hEvt && !pfnApcCallback && !pvApcCtx) 176 Log 6(("VID!NtDeviceIoControlFile: hFile=%#zx pIos=%p->{s:%#x, i:%#zx} uFunction=%#x Input=%p LB %#x Output=%p LB %#x) -> %#x; Caller=%p\n",177 hFile, pIos, pIos->Status, pIos->Information, uFunction, pvInput, cbInput, pvOutput, cbOutput, rcNt, ASMReturnAddress()));190 Log12(("VID!NtDeviceIoControlFile: hFile=%#zx pIos=%p->{s:%#x, i:%#zx} uFunction=%#x Input=%p LB %#x Output=%p LB %#x) -> %#x; Caller=%p\n", 191 hFile, pIos, pIos->Status, pIos->Information, uFunction, pvInput, cbInput, pvOutput, cbOutput, rcNt, ASMReturnAddress())); 178 192 else 179 Log 6(("VID!NtDeviceIoControlFile: hFile=%#zx hEvt=%#zx Apc=%p/%p pIos=%p->{s:%#x, i:%#zx} uFunction=%#x Input=%p LB %#x Output=%p LB %#x) -> %#x; Caller=%p\n",180 hFile, hEvt, pfnApcCallback, pvApcCtx, pIos, pIos->Status, pIos->Information, uFunction,181 pvInput, cbInput, pvOutput, cbOutput, rcNt, ASMReturnAddress()));193 Log12(("VID!NtDeviceIoControlFile: hFile=%#zx hEvt=%#zx Apc=%p/%p pIos=%p->{s:%#x, i:%#zx} uFunction=%#x Input=%p LB %#x Output=%p LB %#x) -> %#x; Caller=%p\n", 194 hFile, hEvt, pfnApcCallback, pvApcCtx, pIos, pIos->Status, pIos->Information, uFunction, 195 pvInput, cbInput, pvOutput, cbOutput, rcNt, ASMReturnAddress())); 182 196 return rcNt; 183 197 } … … 869 883 870 884 885 /** 886 * VM reset notification. 887 * 888 * @param pVM The cross context VM structure. 889 */ 871 890 void nemR3NativeReset(PVM pVM) 872 891 { 873 NOREF(pVM); 874 } 875 876 877 void nemR3NativeResetCpu(PVMCPU pVCpu) 878 { 879 NOREF(pVCpu); 892 /* Unfix the A20 gate. */ 893 pVM->nem.s.fA20Fixed = false; 894 } 895 896 897 /** 898 * Reset CPU due to INIT IPI or hot (un)plugging. 899 * 900 * @param pVCpu The cross context virtual CPU structure of the CPU being 901 * reset. 902 * @param fInitIpi Whether this is the INIT IPI or hot (un)plugging case. 903 */ 904 void nemR3NativeResetCpu(PVMCPU pVCpu, bool fInitIpi) 905 { 906 /* Lock the A20 gate if INIT IPI, make sure it's enabled. */ 907 if (fInitIpi && pVCpu->idCpu > 0) 908 { 909 PVM pVM = pVCpu->CTX_SUFF(pVM); 910 if (!pVM->nem.s.fA20Enabled) 911 nemR3NativeNotifySetA20(pVCpu, true); 912 pVM->nem.s.fA20Enabled = true; 913 pVM->nem.s.fA20Fixed = true; 914 } 880 915 } 881 916 … … 1160 1195 Assert(iReg < RT_ELEMENTS(aenmNames)); 1161 1196 #ifdef NEM_WIN_INTERCEPT_NT_IO_CTLS 1162 Log 6(("Calling WHvSetVirtualProcessorRegisters(%p, %u, %p, %u, %p)\n",1163 pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues));1197 Log12(("Calling WHvSetVirtualProcessorRegisters(%p, %u, %p, %u, %p)\n", 1198 pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues)); 1164 1199 #endif 1165 1200 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues); … … 1287 1322 Assert(RT_ELEMENTS(aenmNames) >= cRegs); 1288 1323 #ifdef NEM_WIN_INTERCEPT_NT_IO_CTLS 1289 Log 6(("Calling WHvGetVirtualProcessorRegisters(%p, %u, %p, %u, %p)\n",1324 Log12(("Calling WHvGetVirtualProcessorRegisters(%p, %u, %p, %u, %p)\n", 1290 1325 pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, cRegs, aValues)); 1291 1326 #endif … … 1347 1382 /* Control registers. */ 1348 1383 Assert(aenmNames[28] == WHvX64RegisterCr0); 1384 bool fMaybeChangedMode = false; 1385 bool fFlushTlb = false; 1386 bool fFlushGlobalTlb = false; 1349 1387 if (pCtx->cr0 != aValues[28].Reg64) 1350 1388 { 1351 1389 CPUMSetGuestCR0(pVCpu, aValues[28].Reg64); 1352 /** @todo more to do here! */ 1390 fMaybeChangedMode = true; 1391 fFlushTlb = fFlushGlobalTlb = true; /// @todo fix this 1353 1392 } 1354 1393 pCtx->cr2 = aValues[29].Reg64; … … 1356 1395 { 1357 1396 CPUMSetGuestCR3(pVCpu, aValues[30].Reg64); 1358 /** @todo more to do here! */1397 fFlushTlb = true; 1359 1398 } 1360 1399 if (pCtx->cr4 != aValues[31].Reg64) 1361 1400 { 1362 1401 CPUMSetGuestCR4(pVCpu, aValues[31].Reg64); 1363 /** @todo more to do here! */ 1402 fMaybeChangedMode = true; 1403 fFlushTlb = fFlushGlobalTlb = true; /// @todo fix this 1364 1404 } 1365 1405 APICSetTpr(pVCpu, (uint8_t)aValues[32].Reg64 << 4); … … 1462 1502 { 1463 1503 pCtx->msrEFER = aValues[65].Reg64; 1464 /** @todo more to do here! */1504 fMaybeChangedMode = true; 1465 1505 } 1466 1506 … … 1496 1536 1497 1537 1538 if (fMaybeChangedMode) 1539 { 1540 int rc = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER); 1541 AssertRC(rc); 1542 } 1543 if (fFlushTlb) 1544 { 1545 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, fFlushGlobalTlb); 1546 AssertRC(rc); 1547 } 1548 1498 1549 return VINF_SUCCESS; 1499 1550 } … … 1521 1572 Log2(("Exit: Memory access: GCPhys=%RGp GCVirt=%RGv %s %s %s\n", 1522 1573 pExitReason->MemoryAccess.Gpa, pExitReason->MemoryAccess.Gva, 1523 pExitReason->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessRead ? "read" 1524 : pExitReason->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite ? "write" 1525 : pExitReason->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessExecute ? "exec" : "!invalid access!", 1574 g_apszWHvMemAccesstypes[pExitReason->MemoryAccess.AccessInfo.AccessType], 1526 1575 pExitReason->MemoryAccess.AccessInfo.GpaUnmapped ? "unmapped" : "mapped", 1527 1576 pExitReason->MemoryAccess.AccessInfo.GvaValid ? "" : "invalid-gc-virt")); … … 1705 1754 1706 1755 /** 1707 * @callback_method_impl{FNPGMPHYSNEMQUERYCHECKER} 1756 * @callback_method_impl{FNPGMPHYSNEMQUERYCHECKER, 1757 * Worker for nemR3WinHandleMemoryAccess. 1758 * On input the boolean at pvUser indicates the desire to make the page 1759 * writable, whereas upon return it indicates whether we did anything to 1760 * the page mapping/protection. } 1708 1761 */ 1709 1762 static DECLCALLBACK(int) nemR3WinHandleMemoryAccessPageCheckerCallback(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, 1710 1763 PPGMPHYSNEMPAGEINFO pInfo, void *pvUser) 1711 1764 { 1712 uint8_t u2State = pInfo->u2NemState; 1765 /* If A20 is disabled, we may need to make another query on the masked 1766 page to get the correct protection information. */ 1767 uint8_t u2State = pInfo->u2NemState; 1768 RTGCPHYS GCPhysSrc; 1769 if ( pVM->nem.s.fA20Enabled 1770 || !NEM_WIN_IS_SUBJECT_TO_A20(GCPhys)) 1771 GCPhysSrc = GCPhys; 1772 else 1773 { 1774 GCPhysSrc = GCPhys & ~(RTGCPHYS)RT_BIT_32(20); 1775 bool const fMakeWritable = *(bool *)pvUser; 1776 PGMPHYSNEMPAGEINFO Info2; 1777 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, GCPhysSrc, fMakeWritable, &Info2, NULL, NULL); 1778 AssertRCReturn(rc, rc); 1779 1780 *pInfo = Info2; 1781 pInfo->u2NemState = u2State; 1782 } 1783 1784 bool *pfDidSomething = (bool *)pvUser; 1713 1785 bool fBackingChanged = true; 1714 1786 switch (u2State) … … 1716 1788 case NEM_WIN_PAGE_STATE_UNMAPPED: 1717 1789 case NEM_WIN_PAGE_STATE_NOT_SET: 1790 { 1718 1791 if (pInfo->fNemProt == NEM_PAGE_PROT_NONE) 1792 { 1793 Log4(("nemR3WinHandleMemoryAccessPageCheckerCallback: %RGp - #1\n", GCPhys)); 1794 *pfDidSomething = false; 1719 1795 return VINF_SUCCESS; 1796 } 1797 1798 #if 0 1799 /* Remap it. */ 1800 *pfDidSomething = true; 1801 int rc = nemR3NativeSetPhysPage(pVM, GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, 1802 GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, pInfo->fNemProt, &u2State, fBackingChanged); 1803 pInfo->u2NemState = u2State; 1804 Log4(("nemR3WinHandleMemoryAccessPageCheckerCallback: %RGp - synced => %s + %Rrc\n", 1805 GCPhys, g_apszPageStates[u2State], rc)); 1806 RT_NOREF(pVCpu); 1807 return rc; 1808 #else 1720 1809 break; 1810 #endif 1811 } 1721 1812 case NEM_WIN_PAGE_STATE_READABLE: 1722 1813 if (pInfo->fNemProt != NEM_PAGE_PROT_NONE) 1723 1814 { 1724 1815 if (!(pInfo->fNemProt & NEM_PAGE_PROT_WRITE)) 1816 { 1817 Log4(("nemR3WinHandleMemoryAccessPageCheckerCallback: %RGp - #2\n", GCPhys)); 1818 *pfDidSomething = false; 1725 1819 return VINF_SUCCESS; 1820 } 1726 1821 fBackingChanged = false; 1727 1822 } … … 1729 1824 case NEM_WIN_PAGE_STATE_WRITABLE: 1730 1825 if (pInfo->fNemProt & NEM_PAGE_PROT_WRITE) 1826 { 1827 Log4(("nemR3WinHandleMemoryAccessPageCheckerCallback: %RGp - #3\n", GCPhys)); 1828 *pfDidSomething = false; 1731 1829 return VINF_SUCCESS; 1830 } 1732 1831 if (pInfo->fNemProt != NEM_PAGE_PROT_NONE) 1733 1832 fBackingChanged = false; … … 1735 1834 } 1736 1835 1737 int rc = nemR3NativeSetPhysPage(pVM, GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, pInfo->fNemProt, &u2State, fBackingChanged); 1836 #if 0 1837 /* Unmap it first, then take another fault, remap it. */ 1838 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE); 1839 if (SUCCEEDED(hrc)) 1840 { 1841 *pfDidSomething = true; 1842 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED; 1843 Log5(("nemR3WinHandleMemoryAccessPageCheckerCallback: %RGp => unmapped\n", GCPhys)); 1844 return VINF_SUCCESS; 1845 } 1846 LogRel(("nemR3WinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n", 1847 GCPhys, hrc, hrc, RTNtCurrentTeb()->LastStatusValue, RTNtCurrentTeb()->LastErrorValue)); 1848 return VERR_NEM_INIT_FAILED; 1849 1850 #else 1851 *pfDidSomething = true; 1852 int rc = nemR3NativeSetPhysPage(pVM, GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, 1853 GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, pInfo->fNemProt, &u2State, fBackingChanged); 1738 1854 pInfo->u2NemState = u2State; 1739 RT_NOREF(pVCpu, pvUser); 1855 Log4(("nemR3WinHandleMemoryAccessPageCheckerCallback: %RGp - synced => %s + %Rrc\n", 1856 GCPhys, g_apszPageStates[u2State], rc)); 1857 RT_NOREF(pVCpu); 1740 1858 return rc; 1859 #endif 1741 1860 } 1742 1861 … … 1759 1878 * out of sync first. 1760 1879 */ 1880 bool fMakeWritableThenDidSomething = pMemCtx->AccessInfo.AccessType == WHvMemoryAccessWrite; 1761 1881 PGMPHYSNEMPAGEINFO Info; 1762 int rc = PGMPhysNemQueryPageInfo(pVM, pVCpu, pMemCtx->Gpa, &Info, nemR3WinHandleMemoryAccessPageCheckerCallback, NULL); 1882 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pMemCtx->Gpa, fMakeWritableThenDidSomething, &Info, 1883 nemR3WinHandleMemoryAccessPageCheckerCallback, &fMakeWritableThenDidSomething); 1763 1884 if (RT_SUCCESS(rc)) 1764 1885 { 1765 if (Info.fNemProt & (pMemCtx->AccessInfo.AccessType == WHvMemoryAccessWrite ? NEM_PAGE_PROT_WRITE :NEM_PAGE_PROT_READ)) 1766 return VINF_SUCCESS; 1767 } 1886 if (Info.fNemProt & (pMemCtx->AccessInfo.AccessType == WHvMemoryAccessWrite ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ)) 1887 { 1888 ////if (fMakeWritableThenDidSomething) 1889 //{ 1890 // Log4(("MemExit: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting (%s)\n", 1891 // pMemCtx->Gpa, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt, 1892 // Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "", 1893 // fMakeWritableThenDidSomething ? "" : " no-change", g_apszWHvMemAccesstypes[pMemCtx->AccessInfo.AccessType])); 1894 // return VINF_SUCCESS; 1895 //} 1896 } 1897 Log4(("MemExit: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating (%s)\n", 1898 pMemCtx->Gpa, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt, 1899 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "", 1900 fMakeWritableThenDidSomething ? "" : " no-change", g_apszWHvMemAccesstypes[pMemCtx->AccessInfo.AccessType])); 1901 } 1902 else 1903 Log4(("MemExit: %RGp rc=%Rrc%s; emulating (%s)\n", pMemCtx->Gpa, rc, 1904 fMakeWritableThenDidSomething ? " modified-backing" : "", g_apszWHvMemAccesstypes[pMemCtx->AccessInfo.AccessType])); 1768 1905 1769 1906 /* … … 1953 2090 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK)) 1954 2091 { 2092 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED); 1955 2093 HRESULT hrc = WHvRunVirtualProcessor(pVM->nem.s.hPartition, pVCpu->idCpu, &ExitReason, sizeof(ExitReason)); 2094 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM); 1956 2095 AssertLogRelMsgBreakStmt(SUCCEEDED(hrc), 1957 2096 ("WHvRunVirtualProcessor(%p, %u,,) -> %Rhrc (Last=%#x/%u)\n", pVM->nem.s.hPartition, pVCpu->idCpu, … … 2104 2243 2105 2244 2245 /** 2246 * Forced flag notification call from VMEmt.h. 2247 * 2248 * This is only called when pVCpu is in the VMCPUSTATE_STARTED_EXEC_NEM state. 2249 * 2250 * @param pVM The cross context VM structure. 2251 * @param pVCpu The cross context virtual CPU structure of the CPU 2252 * to be notified. 2253 * @param fFlags Notification flags, VMNOTIFYFF_FLAGS_XXX. 2254 */ 2255 void nemR3NativeNotifyFF(PVM pVM, PVMCPU pVCpu, uint32_t fFlags) 2256 { 2257 HRESULT hrc = WHvCancelRunVirtualProcessor(pVM->nem.s.hPartition, pVCpu->idCpu, 0); 2258 AssertMsg(SUCCEEDED(hrc), ("WHvCancelRunVirtualProcessor -> hrc=%Rhrc\n", hrc)); 2259 2260 RT_NOREF_PV(hrc); 2261 RT_NOREF_PV(fFlags); 2262 } 2263 2264 2106 2265 DECLINLINE(int) nemR3NativeGCPhys2R3PtrReadOnly(PVM pVM, RTGCPHYS GCPhys, const void **ppv) 2107 2266 { … … 2216 2375 2217 2376 2377 /** 2378 * @callback_method_impl{FNPGMPHYSNEMQUERYCHECKER} 2379 */ 2380 static DECLCALLBACK(int) nemR3WinUnsetForA20CheckerCallback(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, 2381 PPGMPHYSNEMPAGEINFO pInfo, void *pvUser) 2382 { 2383 Assert(pVCpu == NULL); 2384 2385 /* We'll just unmap the memory. */ 2386 if (pInfo->u2NemState > NEM_WIN_PAGE_STATE_UNMAPPED) 2387 { 2388 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE); 2389 if (SUCCEEDED(hrc)) 2390 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED; 2391 else 2392 { 2393 LogRel(("nemR3WinUnsetForA20CheckerCallback/unmap: GCPhys=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n", 2394 GCPhys, hrc, hrc, RTNtCurrentTeb()->LastStatusValue, RTNtCurrentTeb()->LastErrorValue)); 2395 return VERR_INTERNAL_ERROR_2; 2396 } 2397 } 2398 RT_NOREF(pVCpu, pvUser); 2399 return VINF_SUCCESS; 2400 } 2401 2402 2403 /** 2404 * Unmaps a page from Hyper-V for the purpose of emulating A20 gate behavior. 2405 * 2406 * @returns The PGMPhysNemQueryPageInfo result. 2407 * @param pVM The cross context VM structure. 2408 * @param pVCpu The cross context virtual CPU structure. Optional. 2409 * @param GCPhys The page to unmap. 2410 */ 2411 static int nemR3WinUnmapPageForA20Gate(PVM pVM, RTGCPHYS GCPhys) 2412 { 2413 PGMPHYSNEMPAGEINFO Info; 2414 return PGMPhysNemPageInfoChecker(pVM, NULL /*pVCpu*/, GCPhys, false /*fMakeWritable*/, &Info, 2415 nemR3WinUnsetForA20CheckerCallback, NULL); 2416 } 2417 2418 2419 /** 2420 * Called when the A20 state changes. 2421 * 2422 * Hyper-V doesn't seem to offer a simple way of implementing the A20 line 2423 * features of PCs. So, we do a very minimal emulation of the HMA to make DOS 2424 * happy. 2425 * 2426 * @param pVCpu The CPU the A20 state changed on. 2427 * @param fEnabled Whether it was enabled (true) or disabled. 2428 */ 2218 2429 void nemR3NativeNotifySetA20(PVMCPU pVCpu, bool fEnabled) 2219 2430 { 2220 LogRel(("nemR3NativeNotifySetA20: fEnabled=%RTbool\n", fEnabled)); 2221 NOREF(pVCpu); NOREF(fEnabled); 2431 Log(("nemR3NativeNotifySetA20: fEnabled=%RTbool\n", fEnabled)); 2432 PVM pVM = pVCpu->CTX_SUFF(pVM); 2433 if (!pVM->nem.s.fA20Fixed) 2434 { 2435 pVM->nem.s.fA20Enabled = fEnabled; 2436 for (RTGCPHYS GCPhys = _1M; GCPhys < _1M + _64K; GCPhys += X86_PAGE_SIZE) 2437 nemR3WinUnmapPageForA20Gate(pVM, GCPhys); 2438 } 2222 2439 } 2223 2440 … … 2248 2465 2249 2466 2250 static int nemR3NativeSetPhysPage(PVM pVM, RTGCPHYS GCPhys, uint32_t fPageProt, uint8_t *pu2State, bool fBackingChanged) 2467 /** 2468 * Worker that maps pages into Hyper-V. 2469 * 2470 * This is used by the PGM physical page notifications as well as the memory 2471 * access VMEXIT handlers. 2472 * 2473 * @returns VBox status code. 2474 * @param pVM The cross context VM structure. 2475 * @param GCPhysSrc The source page address. 2476 * @param GCPhysDst The hyper-V destination page. This may differ from 2477 * GCPhysSrc when A20 is disabled. 2478 * @param fPageProt NEM_PAGE_PROT_XXX. 2479 * @param pu2State Our page state (input/output). 2480 * @param fBackingChanged Set if the page backing is being changed. 2481 */ 2482 static int nemR3NativeSetPhysPage(PVM pVM, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst, uint32_t fPageProt, 2483 uint8_t *pu2State, bool fBackingChanged) 2251 2484 { 2252 2485 /* … … 2257 2490 uint8_t const u2OldState = *pu2State; 2258 2491 uint8_t const u2NewState = fPageProt & NEM_PAGE_PROT_WRITE ? NEM_WIN_PAGE_STATE_WRITABLE 2259 : fPageProt & NEM_PAGE_PROT_READ ? NEM_WIN_PAGE_STATE_READABLE : NEM_WIN_PAGE_STATE_ NOT_SET;2492 : fPageProt & NEM_PAGE_PROT_READ ? NEM_WIN_PAGE_STATE_READABLE : NEM_WIN_PAGE_STATE_UNMAPPED; 2260 2493 if ( fBackingChanged 2261 2494 || u2NewState != u2OldState) 2262 2495 { 2263 if (u2OldState > NEM_WIN_PAGE_STATE_ NOT_SET)2264 { 2265 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys , X86_PAGE_SIZE);2496 if (u2OldState > NEM_WIN_PAGE_STATE_UNMAPPED) 2497 { 2498 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst, X86_PAGE_SIZE); 2266 2499 if (SUCCEEDED(hrc)) 2267 2500 { 2268 *pu2State = NEM_WIN_PAGE_STATE_NOT_SET; 2269 if (u2NewState == NEM_WIN_PAGE_STATE_NOT_SET) 2501 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED; 2502 if (u2NewState == NEM_WIN_PAGE_STATE_UNMAPPED) 2503 { 2504 Log5(("nemR3NativeSetPhysPage: %RGp => unmapped\n", GCPhysDst)); 2270 2505 return VINF_SUCCESS; 2506 } 2271 2507 } 2272 2508 else 2273 2509 { 2274 LogRel(("nemR3NativeSetPhysPage/unmap: GCPhys =%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",2275 GCPhys , hrc, hrc, RTNtCurrentTeb()->LastStatusValue, RTNtCurrentTeb()->LastErrorValue));2510 LogRel(("nemR3NativeSetPhysPage/unmap: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n", 2511 GCPhysDst, hrc, hrc, RTNtCurrentTeb()->LastStatusValue, RTNtCurrentTeb()->LastErrorValue)); 2276 2512 return VERR_NEM_INIT_FAILED; 2277 2513 } … … 2285 2521 { 2286 2522 void *pvPage; 2287 int rc = nemR3NativeGCPhys2R3PtrWriteable(pVM, GCPhys , &pvPage);2523 int rc = nemR3NativeGCPhys2R3PtrWriteable(pVM, GCPhysSrc, &pvPage); 2288 2524 if (RT_SUCCESS(rc)) 2289 2525 { 2290 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvPage, GCPhys , X86_PAGE_SIZE,2526 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvPage, GCPhysDst, X86_PAGE_SIZE, 2291 2527 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute | WHvMapGpaRangeFlagWrite); 2292 2528 if (SUCCEEDED(hrc)) 2293 2529 { 2530 Log5(("nemR3NativeSetPhysPage: %RGp => writable\n", GCPhysDst)); 2294 2531 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE; 2295 2532 return VINF_SUCCESS; 2296 2533 } 2297 LogRel(("nemR3NativeSetPhysPage/writable: GCPhys =%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",2298 GCPhys , hrc, hrc, RTNtCurrentTeb()->LastStatusValue, RTNtCurrentTeb()->LastErrorValue));2534 LogRel(("nemR3NativeSetPhysPage/writable: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n", 2535 GCPhysDst, hrc, hrc, RTNtCurrentTeb()->LastStatusValue, RTNtCurrentTeb()->LastErrorValue)); 2299 2536 return VERR_NEM_INIT_FAILED; 2300 2537 } 2301 LogRel(("nemR3NativeSetPhysPage/writable: GCPhys =%RGp rc=%Rrc\n", GCPhys, rc));2538 LogRel(("nemR3NativeSetPhysPage/writable: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc)); 2302 2539 return rc; 2303 2540 } … … 2306 2543 { 2307 2544 const void *pvPage; 2308 int rc = nemR3NativeGCPhys2R3PtrReadOnly(pVM, GCPhys , &pvPage);2545 int rc = nemR3NativeGCPhys2R3PtrReadOnly(pVM, GCPhysSrc, &pvPage); 2309 2546 if (RT_SUCCESS(rc)) 2310 2547 { 2311 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, (void *)pvPage, GCPhys , X86_PAGE_SIZE,2548 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, (void *)pvPage, GCPhysDst, X86_PAGE_SIZE, 2312 2549 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute); 2313 2550 if (SUCCEEDED(hrc)) 2314 2551 { 2315 2552 *pu2State = NEM_WIN_PAGE_STATE_READABLE; 2553 Log5(("nemR3NativeSetPhysPage: %RGp => read+exec\n", GCPhysDst)); 2316 2554 return VINF_SUCCESS; 2317 2555 } 2318 LogRel(("nemR3NativeSetPhysPage/readonly: GCPhys =%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",2319 GCPhys , hrc, hrc, RTNtCurrentTeb()->LastStatusValue, RTNtCurrentTeb()->LastErrorValue));2556 LogRel(("nemR3NativeSetPhysPage/readonly: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n", 2557 GCPhysDst, hrc, hrc, RTNtCurrentTeb()->LastStatusValue, RTNtCurrentTeb()->LastErrorValue)); 2320 2558 return VERR_NEM_INIT_FAILED; 2321 2559 } 2322 LogRel(("nemR3NativeSetPhysPage/readonly: GCPhys =%RGp rc=%Rrc\n", GCPhys, rc));2560 LogRel(("nemR3NativeSetPhysPage/readonly: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc)); 2323 2561 return rc; 2324 2562 } … … 2337 2575 RT_NOREF_PV(HCPhys); RT_NOREF_PV(enmType); 2338 2576 2339 return nemR3NativeSetPhysPage(pVM, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/); 2577 int rc; 2578 if ( pVM->nem.s.fA20Enabled 2579 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys)) 2580 rc = nemR3NativeSetPhysPage(pVM, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/); 2581 else 2582 { 2583 /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */ 2584 rc = nemR3WinUnmapPageForA20Gate(pVM, GCPhys | RT_BIT_32(20)); 2585 if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys) && RT_SUCCESS(rc)) 2586 rc = nemR3NativeSetPhysPage(pVM, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/); 2587 2588 } 2589 return rc; 2340 2590 } 2341 2591 … … 2348 2598 RT_NOREF_PV(HCPhys); RT_NOREF_PV(enmType); 2349 2599 2350 nemR3NativeSetPhysPage(pVM, GCPhys, fPageProt, pu2State, false /*fBackingChanged*/); 2600 if ( pVM->nem.s.fA20Enabled 2601 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys)) 2602 nemR3NativeSetPhysPage(pVM, GCPhys, GCPhys, fPageProt, pu2State, false /*fBackingChanged*/); 2603 else 2604 { 2605 /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */ 2606 nemR3WinUnmapPageForA20Gate(pVM, GCPhys | RT_BIT_32(20)); 2607 if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys)) 2608 nemR3NativeSetPhysPage(pVM, GCPhys, GCPhys, fPageProt, pu2State, false /*fBackingChanged*/); 2609 } 2351 2610 } 2352 2611 … … 2359 2618 RT_NOREF_PV(HCPhysPrev); RT_NOREF_PV(HCPhysNew); RT_NOREF_PV(enmType); 2360 2619 2361 nemR3NativeSetPhysPage(pVM, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/); 2362 } 2363 2620 if ( pVM->nem.s.fA20Enabled 2621 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys)) 2622 nemR3NativeSetPhysPage(pVM, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/); 2623 else 2624 { 2625 /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */ 2626 nemR3WinUnmapPageForA20Gate(pVM, GCPhys | RT_BIT_32(20)); 2627 if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys)) 2628 nemR3NativeSetPhysPage(pVM, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/); 2629 } 2630 } 2631 -
trunk/src/VBox/VMM/VMMR3/VM.cpp
r70953 r71040 4650 4650 EMR3ResetCpu(pVCpu); 4651 4651 HMR3ResetCpu(pVCpu); 4652 NEMR3ResetCpu(pVCpu );4652 NEMR3ResetCpu(pVCpu, false /*fInitIpi*/); 4653 4653 return VINF_EM_WAIT_SIPI; 4654 4654 } -
trunk/src/VBox/VMM/VMMR3/VMEmt.cpp
r69111 r71040 24 24 #include <VBox/vmm/dbgf.h> 25 25 #include <VBox/vmm/em.h> 26 #include <VBox/vmm/nem.h> 26 27 #include <VBox/vmm/pdmapi.h> 27 28 #ifdef VBOX_WITH_REM … … 843 844 AssertRC(rc); 844 845 } 845 else if ( ( (fFlags & VMNOTIFYFF_FLAGS_POKE) 846 || !(fFlags & VMNOTIFYFF_FLAGS_DONE_REM)) 847 && pUVCpu->pVCpu) 848 { 849 VMCPUSTATE enmState = VMCPU_GET_STATE(pUVCpu->pVCpu); 850 if (enmState == VMCPUSTATE_STARTED_EXEC) 851 { 852 if (fFlags & VMNOTIFYFF_FLAGS_POKE) 853 { 854 int rc = SUPR3CallVMMR0Ex(pUVCpu->pVM->pVMR0, pUVCpu->idCpu, VMMR0_DO_GVMM_SCHED_POKE, 0, NULL); 855 AssertRC(rc); 856 } 857 } 846 else if ( (fFlags & VMNOTIFYFF_FLAGS_POKE) 847 || !(fFlags & VMNOTIFYFF_FLAGS_DONE_REM)) 848 { 849 PVMCPU pVCpu = pUVCpu->pVCpu; 850 if (pVCpu) 851 { 852 VMCPUSTATE enmState = VMCPU_GET_STATE(pVCpu); 853 if (enmState == VMCPUSTATE_STARTED_EXEC) 854 { 855 if (fFlags & VMNOTIFYFF_FLAGS_POKE) 856 { 857 int rc = SUPR3CallVMMR0Ex(pUVCpu->pVM->pVMR0, pUVCpu->idCpu, VMMR0_DO_GVMM_SCHED_POKE, 0, NULL); 858 AssertRC(rc); 859 } 860 } 861 else if (enmState == VMCPUSTATE_STARTED_EXEC_NEM) 862 NEMR3NotifyFF(pUVCpu->pVM, pVCpu, fFlags); 858 863 #ifdef VBOX_WITH_REM 859 else if (enmState == VMCPUSTATE_STARTED_EXEC_REM)860 {861 if (!(fFlags & VMNOTIFYFF_FLAGS_DONE_REM))862 REMR3NotifyFF(pUVCpu->pVM);863 }864 else if (enmState == VMCPUSTATE_STARTED_EXEC_REM) 865 { 866 if (!(fFlags & VMNOTIFYFF_FLAGS_DONE_REM)) 867 REMR3NotifyFF(pUVCpu->pVM); 868 } 864 869 #endif 870 } 865 871 } 866 872 } … … 989 995 AssertRC(rc); 990 996 } 997 else 998 { 999 PVMCPU pVCpu = pUVCpu->pVCpu; 1000 if (pVCpu) 1001 { 1002 VMCPUSTATE enmState = pVCpu->enmState; 1003 if (enmState == VMCPUSTATE_STARTED_EXEC_NEM) 1004 NEMR3NotifyFF(pUVCpu->pVM, pVCpu, fFlags); 991 1005 #ifdef VBOX_WITH_REM 992 else if ( !(fFlags & VMNOTIFYFF_FLAGS_DONE_REM) 993 && pUVCpu->pVCpu 994 && pUVCpu->pVCpu->enmState == VMCPUSTATE_STARTED_EXEC_REM) 995 REMR3NotifyFF(pUVCpu->pVM); 996 #else 997 RT_NOREF(fFlags); 1006 else if ( !(fFlags & VMNOTIFYFF_FLAGS_DONE_REM) 1007 && enmState == VMCPUSTATE_STARTED_EXEC_REM) 1008 REMR3NotifyFF(pUVCpu->pVM); 998 1009 #endif 1010 } 1011 } 999 1012 } 1000 1013 -
trunk/src/VBox/VMM/VMMR3/VMM.cpp
r70948 r71040 1497 1497 EMR3ResetCpu(pVCpu); 1498 1498 HMR3ResetCpu(pVCpu); 1499 NEMR3ResetCpu(pVCpu );1499 NEMR3ResetCpu(pVCpu, true /*fInitIpi*/); 1500 1500 1501 1501 /* This will trickle up on the target EMT. */ -
trunk/src/VBox/VMM/include/NEMInternal.h
r70979 r71040 47 47 #ifdef RT_OS_WINDOWS 48 48 /** Set if we've created the EMTs. */ 49 bool fCreatedEmts ;49 bool fCreatedEmts : 1; 50 50 /** WHvRunVpExitReasonX64Cpuid is supported. */ 51 bool fExtendedMsrExit ;51 bool fExtendedMsrExit : 1; 52 52 /** WHvRunVpExitReasonX64MsrAccess is supported. */ 53 bool fExtendedCpuIdExit ;53 bool fExtendedCpuIdExit : 1; 54 54 /** WHvRunVpExitReasonException is supported. */ 55 bool fExtendedXcptExit; 55 bool fExtendedXcptExit : 1; 56 /** Set if we've started more than one CPU and cannot mess with A20. */ 57 bool fA20Fixed : 1; 58 /** Set if A20 is enabled. */ 59 bool fA20Enabled : 1; 56 60 /** The reported CPU vendor. */ 57 61 CPUMCPUVENDOR enmCpuVendor; … … 114 118 int nemR3NativeTerm(PVM pVM); 115 119 void nemR3NativeReset(PVM pVM); 116 void nemR3NativeResetCpu(PVMCPU pVCpu );120 void nemR3NativeResetCpu(PVMCPU pVCpu, bool fInitIpi); 117 121 VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu); 118 122 bool nemR3NativeCanExecuteGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx); 119 123 bool nemR3NativeSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable); 124 void nemR3NativeNotifyFF(PVM pVM, PVMCPU pVCpu, uint32_t fFlags); 120 125 121 126 int nemR3NativeNotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb);
Note:
See TracChangeset
for help on using the changeset viewer.