Changeset 72207 in vbox
- Timestamp:
- May 14, 2018 7:16:43 PM (7 years ago)
- svn:sync-xref-src-repo-rev:
- 122646
- Location:
- trunk
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/err.h
r71349 r72207 2839 2839 /** Get register caller must change the CPU mode. */ 2840 2840 #define VINF_NEM_CHANGE_PGM_MODE (6810) 2841 2842 /** NEM internal processing error \#0. */ 2843 #define VINF_NEM_IPE_0 (6890) 2844 /** NEM internal processing error \#1. */ 2845 #define VINF_NEM_IPE_1 (6891) 2846 /** NEM internal processing error \#2. */ 2847 #define VINF_NEM_IPE_2 (6892) 2848 /** NEM internal processing error \#3. */ 2849 #define VINF_NEM_IPE_3 (6893) 2850 /** NEM internal processing error \#4. */ 2851 #define VINF_NEM_IPE_4 (6894) 2852 /** NEM internal processing error \#5. */ 2853 #define VINF_NEM_IPE_5 (6895) 2854 /** NEM internal processing error \#6. */ 2855 #define VINF_NEM_IPE_6 (6896) 2856 /** NEM internal processing error \#7. */ 2857 #define VINF_NEM_IPE_7 (6897) 2858 /** NEM internal processing error \#8. */ 2859 #define VINF_NEM_IPE_8 (6898) 2860 /** NEM internal processing error \#9. */ 2861 #define VINF_NEM_IPE_9 (6899) 2841 2862 /** @} */ 2842 2863 -
trunk/include/VBox/vmm/cpumctx.h
r72178 r72207 882 882 /** NEM/Win: Event injection (known was interruption) pending state. */ 883 883 #define CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT UINT64_C(0x0001000000000000) 884 /** NEM/Win: Inhibit maskable interrupts (VMCPU_FF_INHIBIT_INTERRUPTS). */ 885 #define CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT UINT64_C(0x0002000000000000) 886 /** NEM/Win: Inhibit non-maskable interrupts (VMCPU_FF_BLOCK_NMIS). */ 887 #define CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI UINT64_C(0x0002000000000000) 884 888 /** NEM/Win: Mask. */ 885 #define CPUMCTX_EXTRN_NEM_WIN_MASK UINT64_C(0x000 1000000000000)889 #define CPUMCTX_EXTRN_NEM_WIN_MASK UINT64_C(0x0003000000000000) 886 890 887 891 /** All CPUM state bits, not including keeper specific ones. */ -
trunk/src/VBox/VMM/VMMAll/NEMAllNativeTemplate-win.cpp.h
r71297 r72207 386 386 /// @todo WHvRegisterPendingEvent0 387 387 /// @todo WHvRegisterPendingEvent1 388 /// @todo WHvX64RegisterDeliverabilityNotifications 388 389 389 390 /* … … 424 425 # else 425 426 WHV_REGISTER_NAME aenmNames[128]; 427 AssertFailed(); /** @todo this code is out of date! */ 426 428 427 429 /* GPRs */ … … 1206 1208 * @param pCtx The CPU context to import into. 1207 1209 * @param fWhat What to import. 1208 * @param pszCaller Who eis doing the importing.1210 * @param pszCaller Who is doing the importing. 1209 1211 */ 1210 1212 DECLINLINE(VBOXSTRICTRC) nemR0WinImportStateStrict(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat, const char *pszCaller) … … 1226 1228 } 1227 1229 # endif /* IN_RING0 */ 1230 1231 1232 /** 1233 * Wrapper around nemR0WinImportStateStrict and nemHCWinCopyStateFromHyperV. 1234 * 1235 * Unlike the wrapped APIs, this checks whether it's necessary. 1236 * 1237 * @returns VBox strict status code. 1238 * @param pGVM The global (ring-0) VM structure. 1239 * @param pGVCpu The global (ring-0) per CPU structure. 1240 * @param pCtx The CPU context to import into. 1241 * @param fWhat What to import. 1242 * @param pszCaller Who is doing the importing. 1243 */ 1244 DECLINLINE(VBOXSTRICTRC) nemHCWinImportStateIfNeededStrict(PVMCPU pVCpu, PGVMCPU pGVCpu, PCPUMCTX pCtx, 1245 uint64_t fWhat, const char *pszCaller) 1246 { 1247 if (pCtx->fExtrn & fWhat) 1248 { 1249 # ifdef IN_RING0 1250 RT_NOREF(pVCpu); 1251 return nemR0WinImportStateStrict(pGVCpu->pGVM, pGVCpu, pCtx, fWhat, pszCaller); 1252 # else 1253 RT_NOREF(pGVCpu, pszCaller); 1254 int rc = nemHCWinCopyStateFromHyperV(pVCpu->pVMR3, pVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM); 1255 AssertRCReturn(rc, rc); 1256 # endif 1257 } 1258 return VINF_SUCCESS; 1259 } 1260 1228 1261 1229 1262 /** … … 1232 1265 * ASSUMES no state copied yet. 1233 1266 * 1267 * @param pVCpu The cross context per CPU structure. 1234 1268 * @param pCtx The registe rcontext. 1235 1269 * @param pHdr The X64 intercept message header. 1236 1270 */ 1237 DECLINLINE(void) nemHCWinCopyStateFromX64Header(P CPUMCTX pCtx, HV_X64_INTERCEPT_MESSAGE_HEADER const *pHdr)1238 { 1239 Assert( (pCtx->fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS ))1240 == (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS ));1271 DECLINLINE(void) nemHCWinCopyStateFromX64Header(PVMCPU pVCpu, PCPUMCTX pCtx, HV_X64_INTERCEPT_MESSAGE_HEADER const *pHdr) 1272 { 1273 Assert( (pCtx->fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT)) 1274 == (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT)); 1241 1275 NEM_WIN_COPY_BACK_SEG(pCtx->cs, pHdr->CsSegment); 1242 pCtx->rip = pHdr->Rip;1276 pCtx->rip = pHdr->Rip; 1243 1277 pCtx->rflags.u = pHdr->Rflags; 1244 pCtx->fExtrn &= ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS); 1278 1279 pVCpu->nem.s.fLastInterruptShadow = pHdr->ExecutionState.InterruptShadow; 1280 if (!pHdr->ExecutionState.InterruptShadow) 1281 { 1282 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 1283 { /* likely */ } 1284 else 1285 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 1286 } 1287 else 1288 { 1289 EMSetInhibitInterruptsPC(pVCpu, pHdr->Rip); 1290 VMCPU_FF_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 1291 } 1292 1293 pCtx->fExtrn &= ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT); 1245 1294 } 1246 1295 … … 1265 1314 1266 1315 /* 1267 * Whatever we do, we must clear pending event ejection upon resume.1316 * Whatever we do, we must clear pending event injection upon resume. 1268 1317 */ 1269 1318 if (pMsg->Header.ExecutionState.InterruptionPending) 1270 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_ MASK;1319 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT; 1271 1320 1272 1321 #if 0 /* Experiment: 20K -> 34K exit/s. */ … … 1325 1374 * Emulate the memory access, either access handler or special memory. 1326 1375 */ 1327 nemHCWinCopyStateFromX64Header(p Ctx, &pMsg->Header);1376 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, &pMsg->Header); 1328 1377 VBOXSTRICTRC rcStrict; 1329 1378 # ifdef IN_RING0 … … 1378 1427 */ 1379 1428 if (pMsg->Header.ExecutionState.InterruptionPending) 1380 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_ MASK;1429 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT; 1381 1430 1382 1431 VBOXSTRICTRC rcStrict; … … 1397 1446 if (IOM_SUCCESS(rcStrict)) 1398 1447 { 1399 nemHCWinCopyStateFromX64Header(p Ctx, &pMsg->Header);1448 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, &pMsg->Header); 1400 1449 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pMsg->Header); 1401 1450 } … … 1415 1464 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RAX; 1416 1465 Log4(("IOExit/%u: RAX %#RX64 -> %#RX64\n", pVCpu->idCpu, pMsg->Rax, pCtx->rax)); 1417 nemHCWinCopyStateFromX64Header(p Ctx, &pMsg->Header);1466 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, &pMsg->Header); 1418 1467 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pMsg->Header); 1419 1468 } … … 1436 1485 * the opcode bytes for possible evil prefixes. 1437 1486 */ 1438 nemHCWinCopyStateFromX64Header(p Ctx, &pMsg->Header);1487 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, &pMsg->Header); 1439 1488 pCtx->fExtrn &= ~( CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDI | CPUMCTX_EXTRN_RSI 1440 1489 | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES); … … 1479 1528 1480 1529 /** 1530 * Deals with interrupt window message. 1531 * 1532 * @returns Strict VBox status code. 1533 * @param pVM The cross context VM structure. 1534 * @param pVCpu The cross context per CPU structure. 1535 * @param pMsg The message. 1536 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3). 1537 */ 1538 NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageInterruptWindow(PVM pVM, PVMCPU pVCpu, 1539 HV_X64_INTERRUPT_WINDOW_MESSAGE const *pMsg, 1540 PCPUMCTX pCtx, PGVMCPU pGVCpu) 1541 { 1542 /* 1543 * Assert message sanity. 1544 */ 1545 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_EXECUTE 1546 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ // READ & WRITE are probably not used here 1547 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE); 1548 AssertMsg(pMsg->Header.InstructionLength < 0x10, ("%#x\n", pMsg->Header.InstructionLength)); 1549 AssertMsg(pMsg->Type == HvX64PendingInterrupt || pMsg->Type == HvX64PendingNmi, ("%#x\n", pMsg->Type)); 1550 1551 /* 1552 * Just copy the state we've got and handle it in the loop for now. 1553 */ 1554 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, &pMsg->Header); 1555 Log4(("IOIntW/%u: %04x:%08RX64: %u\n", pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, pMsg->Type)); 1556 1557 /** @todo call nemHCWinHandleInterruptFF */ 1558 RT_NOREF(pVM, pGVCpu); 1559 return VINF_SUCCESS; 1560 } 1561 1562 1563 /** 1481 1564 * Handles messages (VM exits). 1482 1565 * … … 1514 1597 case HvMessageTypeX64Halt: 1515 1598 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitHalt); 1599 Log4(("HaltExit\n")); 1516 1600 return VINF_EM_HALT; 1517 1601 1518 1602 case HvMessageTypeX64InterruptWindow: 1519 AssertLogRelMsgFailedReturn(("Message type %#x not implemented!\n", pMsg->Header.MessageType), 1520 VERR_INTERNAL_ERROR_2); 1603 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64InterruptWindow)); 1604 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitInterruptWindow); 1605 return nemHCWinHandleMessageInterruptWindow(pVM, pVCpu, &pMsg->X64InterruptWindow, pCtx, pGVCpu); 1521 1606 1522 1607 case HvMessageTypeInvalidVpRegisterValue: … … 1524 1609 case HvMessageTypeUnsupportedFeature: 1525 1610 case HvMessageTypeTlbPageSizeMismatch: 1526 AssertLogRelMsgFailedReturn(("Message type %#x not implemented!\n", pMsg->Header.MessageType), 1611 LogRel(("Unimplemented msg:\n%.*Rhxd\n", (int)sizeof(*pMsg), pMsg)); 1612 AssertLogRelMsgFailedReturn(("Message type %#x not implemented!\n%.32Rhxd\n", pMsg->Header.MessageType, pMsg), 1527 1613 VERR_INTERNAL_ERROR_2); 1528 1614 … … 1537 1623 case HvMessageTypeEventLogBufferComplete: 1538 1624 case HvMessageTimerExpired: 1625 LogRel(("Unexpected msg:\n%.*Rhxd\n", (int)sizeof(*pMsg), pMsg)); 1539 1626 AssertLogRelMsgFailedReturn(("Unexpected message on CPU #%u: #x\n", pVCpu->idCpu, pMsg->Header.MessageType), 1540 1627 VERR_INTERNAL_ERROR_2); 1541 1628 1542 1629 default: 1630 LogRel(("Unknown msg:\n%.*Rhxd\n", (int)sizeof(*pMsg), pMsg)); 1543 1631 AssertLogRelMsgFailedReturn(("Unknown message on CPU #%u: #x\n", pVCpu->idCpu, pMsg->Header.MessageType), 1544 1632 VERR_INTERNAL_ERROR_2); … … 1694 1782 1695 1783 1784 /** 1785 * Deals with pending interrupt related force flags, may inject interrupt. 1786 * 1787 * @returns VBox strict status code. 1788 * @param pVM The cross context VM structure. 1789 * @param pVCpu The cross context per CPU structure. 1790 * @param pGVCpu The global (ring-0) per CPU structure. 1791 * @param pCtx The register context. 1792 * @param pfInterruptWindows Where to return interrupt window flags. 1793 */ 1794 NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleInterruptFF(PVM pVM, PVMCPU pVCpu, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint8_t *pfInterruptWindows) 1795 { 1796 Assert(!TRPMHasTrap(pVCpu)); 1797 RT_NOREF_PV(pVM); 1798 1799 /* 1800 * First update APIC. 1801 */ 1802 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC)) 1803 { 1804 APICUpdatePendingInterrupts(pVCpu); 1805 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC 1806 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI)) 1807 return VINF_SUCCESS; 1808 } 1809 1810 /* 1811 * We don't currently implement SMIs. 1812 */ 1813 AssertReturn(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_SMI), VINF_NEM_IPE_0); 1814 1815 /* 1816 * Check if we've got the minimum of state required for deciding whether we 1817 * can inject interrupts and NMIs. If we don't have it, get all we might require 1818 * for injection via IEM. 1819 */ 1820 bool const fPendingNmi = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI); 1821 uint64_t fNeedExtrn = CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS 1822 | (fPendingNmi ? CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI : 0); 1823 if (pCtx->fExtrn & fNeedExtrn) 1824 { 1825 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "IntFF"); 1826 if (rcStrict != VINF_SUCCESS) 1827 return rcStrict; 1828 } 1829 bool const fInhibitInterrupts = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 1830 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip; 1831 1832 /* 1833 * NMI? Try deliver it first. 1834 */ 1835 if (fPendingNmi) 1836 { 1837 if ( !fInhibitInterrupts 1838 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS)) 1839 { 1840 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "NMI"); 1841 if (rcStrict == VINF_SUCCESS) 1842 { 1843 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI); 1844 rcStrict = IEMInjectTrap(pVCpu, X86_XCPT_NMI, TRPM_HARDWARE_INT, 0, 0, 0); 1845 Log8(("Injected NMI on %u (%d)\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) )); 1846 } 1847 return rcStrict; 1848 } 1849 *pfInterruptWindows |= NEM_WIN_INTW_F_NMI; 1850 } 1851 1852 /* 1853 * APIC or PIC interrupt? 1854 */ 1855 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)) 1856 { 1857 if ( !fInhibitInterrupts 1858 && pCtx->rflags.Bits.u1IF) 1859 { 1860 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "NMI"); 1861 if (rcStrict == VINF_SUCCESS) 1862 { 1863 uint8_t bInterrupt; 1864 int rc = PDMGetInterrupt(pVCpu, &bInterrupt); 1865 if (RT_SUCCESS(rc)) 1866 { 1867 rcStrict = IEMInjectTrap(pVCpu, bInterrupt, TRPM_HARDWARE_INT, 0, 0, 0); 1868 Log8(("Injected interrupt %#x on %u (%d)\n", bInterrupt, pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) )); 1869 } 1870 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR) 1871 { 1872 *pfInterruptWindows |= (bInterrupt >> 4 /*??*/) << NEM_WIN_INTW_F_PRIO_SHIFT; 1873 Log8(("VERR_APIC_INTR_MASKED_BY_TPR: *pfInterruptWindows=%#x\n", *pfInterruptWindows)); 1874 } 1875 else 1876 Log8(("PDMGetInterrupt failed -> %d\n", rc)); 1877 } 1878 return rcStrict; 1879 } 1880 *pfInterruptWindows |= NEM_WIN_INTW_F_REGULAR; 1881 } 1882 1883 return VINF_SUCCESS; 1884 } 1885 1886 1696 1887 NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinRunGC(PVM pVM, PVMCPU pVCpu, PGVM pGVM, PGVMCPU pGVCpu) 1697 1888 { … … 1722 1913 */ 1723 1914 VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader = (VID_MESSAGE_MAPPING_HEADER volatile *)pVCpu->nem.s.pvMsgSlotMapping; 1724 uint32_t cMillies = 5000; /** @todo lower this later... */ 1725 const bool fSingleStepping = false; /** @todo get this from somewhere. */ 1726 VBOXSTRICTRC rcStrict = VINF_SUCCESS; 1727 for (unsigned iLoop = 0;;iLoop++) 1728 { 1915 uint32_t cMillies = 5000; /** @todo lower this later... */ 1916 const bool fSingleStepping = DBGFIsStepping(pVCpu); 1917 // const uint32_t fCheckVmFFs = !fSingleStepping ? VM_FF_HP_R0_PRE_HM_MASK 1918 // : VM_FF_HP_R0_PRE_HM_STEP_MASK; 1919 // const uint32_t fCheckCpuFFs = !fSingleStepping ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK; 1920 VBOXSTRICTRC rcStrict = VINF_SUCCESS; 1921 for (unsigned iLoop = 0;; iLoop++) 1922 { 1923 /* 1924 * Pending interrupts or such? Need to check and deal with this prior to the state syncing. 1925 * Note! This may stop execution. 1926 */ 1927 pVCpu->nem.s.fDesiredInterruptWindows = 0; 1928 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC 1929 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI)) 1930 { 1931 if (pVCpu->nem.s.fHandleAndGetFlags == VID_MSHAGN_F_GET_NEXT_MESSAGE) 1932 { 1933 pVCpu->nem.s.fHandleAndGetFlags = 0; 1934 rcStrict = nemHCWinStopCpu(pVM, pVCpu, rcStrict, pMappingHeader, pGVM, pGVCpu); 1935 if (rcStrict != VINF_SUCCESS) 1936 break; 1937 } 1938 rcStrict = nemHCWinHandleInterruptFF(pVM, pVCpu, pGVCpu, pCtx, &pVCpu->nem.s.fDesiredInterruptWindows); 1939 if (rcStrict != VINF_SUCCESS) 1940 break; 1941 } 1942 1729 1943 /* 1730 1944 * Ensure that hyper-V has the whole state. 1731 1945 */ 1732 if ((pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK)) != (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK)) 1946 if ( (pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK)) 1947 != (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK) 1948 || pVCpu->nem.s.fCurrentInterruptWindows != pVCpu->nem.s.fDesiredInterruptWindows) 1733 1949 { 1734 1950 # ifdef IN_RING0 -
trunk/src/VBox/VMM/VMMR0/NEMR0Native-win.cpp
r71296 r72207 30 30 #include <VBox/vmm/em.h> 31 31 #include <VBox/vmm/apic.h> 32 #include <VBox/vmm/pdm.h> 32 33 #include "NEMInternal.h" 33 34 #include <VBox/vmm/gvm.h> … … 1020 1021 iReg++; 1021 1022 } 1022 /// @todo HvRegisterInterruptState 1023 1024 /* Interruptibility state. This can get a little complicated since we get 1025 half of the state via HV_X64_VP_EXECUTION_STATE. */ 1026 if ( (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI)) 1027 == (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI) ) 1028 { 1029 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1030 pInput->Elements[iReg].Name = HvRegisterInterruptState; 1031 pInput->Elements[iReg].Value.Reg64 = 0; 1032 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 1033 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip) 1034 pInput->Elements[iReg].Value.InterruptState.InterruptShadow = 1; 1035 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS)) 1036 pInput->Elements[iReg].Value.InterruptState.NmiMasked = 1; 1037 iReg++; 1038 } 1039 else if (fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT) 1040 { 1041 if ( pVCpu->nem.s.fLastInterruptShadow 1042 || ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 1043 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip)) 1044 { 1045 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1046 pInput->Elements[iReg].Name = HvRegisterInterruptState; 1047 pInput->Elements[iReg].Value.Reg64 = 0; 1048 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 1049 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip) 1050 pInput->Elements[iReg].Value.InterruptState.InterruptShadow = 1; 1051 /** @todo Retrieve NMI state, currently assuming it's zero. (yes this may happen on I/O) */ 1052 //if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS)) 1053 // pInput->Elements[iReg].Value.InterruptState.NmiMasked = 1; 1054 iReg++; 1055 } 1056 } 1057 else 1058 Assert(!(fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI)); 1059 1060 /* Interrupt windows. */ 1061 uint8_t const fDesiredIntWin = pVCpu->nem.s.fDesiredInterruptWindows; 1062 if (pVCpu->nem.s.fCurrentInterruptWindows != fDesiredIntWin) 1063 { 1064 pVCpu->nem.s.fCurrentInterruptWindows = pVCpu->nem.s.fDesiredInterruptWindows; 1065 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1066 pInput->Elements[iReg].Name = HvX64RegisterDeliverabilityNotifications; 1067 pInput->Elements[iReg].Value.DeliverabilityNotifications.AsUINT64 = fDesiredIntWin; 1068 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.NmiNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_NMI)); 1069 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.InterruptNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_REGULAR)); 1070 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.InterruptPriority == (fDesiredIntWin & NEM_WIN_INTW_F_PRIO_MASK) >> NEM_WIN_INTW_F_PRIO_SHIFT); 1071 iReg++; 1072 } 1073 1023 1074 /// @todo HvRegisterPendingEvent0 1024 1075 /// @todo HvRegisterPendingEvent1 … … 1255 1306 } 1256 1307 1308 /* Interruptibility. */ 1309 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI)) 1310 { 1311 pInput->Names[iReg++] = HvRegisterInterruptState; 1312 pInput->Names[iReg++] = HvX64RegisterRip; 1313 } 1314 1257 1315 /* event injection */ 1258 1316 pInput->Names[iReg++] = HvRegisterPendingInterruption; 1259 pInput->Names[iReg++] = HvRegisterInterruptState;1260 pInput->Names[iReg++] = HvRegisterInterruptState;1261 1317 pInput->Names[iReg++] = HvRegisterPendingEvent0; 1262 1318 pInput->Names[iReg++] = HvRegisterPendingEvent1; … … 1698 1754 } 1699 1755 1756 /* Interruptibility. */ 1757 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI)) 1758 { 1759 Assert(pInput->Names[iReg] == HvRegisterInterruptState); 1760 Assert(pInput->Names[iReg + 1] == HvX64RegisterRip); 1761 1762 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT)) 1763 { 1764 pVCpu->nem.s.fLastInterruptShadow = paValues[iReg].InterruptState.InterruptShadow; 1765 if (paValues[iReg].InterruptState.InterruptShadow) 1766 { 1767 EMSetInhibitInterruptsPC(pVCpu, paValues[iReg + 1].Reg64); 1768 VMCPU_FF_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 1769 } 1770 else 1771 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 1772 } 1773 1774 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI)) 1775 { 1776 if (paValues[iReg].InterruptState.NmiMasked) 1777 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS); 1778 else 1779 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS); 1780 } 1781 1782 fWhat |= CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI; 1783 iReg += 2; 1784 } 1785 1700 1786 /* Event injection. */ 1701 1787 /// @todo HvRegisterPendingInterruption … … 1711 1797 } 1712 1798 1713 /// @todo HvRegisterInterruptState1714 1799 /// @todo HvRegisterPendingEvent0 1715 1800 /// @todo HvRegisterPendingEvent1 -
trunk/src/VBox/VMM/VMMR3/EMR3Nem.cpp
r71293 r72207 401 401 { 402 402 if (pCtx->eflags.Bits.u1VM) 403 Log(("NEMV86: %08 XIF=%d\n", pCtx->eip, pCtx->eflags.Bits.u1IF));403 Log(("NEMV86: %08x IF=%d\n", pCtx->eip, pCtx->eflags.Bits.u1IF)); 404 404 else if (CPUMIsGuestIn64BitCodeEx(pCtx)) 405 Log(("NEMR%d: %04 X:%RGv ESP=%RGv IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pCtx->cs.Sel, (RTGCPTR)pCtx->rip, pCtx->rsp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));405 Log(("NEMR%d: %04x:%RGv ESP=%RGv IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pCtx->cs.Sel, (RTGCPTR)pCtx->rip, pCtx->rsp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER)); 406 406 else 407 Log(("NEMR%d: %04 X:%08XESP=%08X IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pCtx->cs.Sel, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));407 Log(("NEMR%d: %04x:%08x ESP=%08X IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pCtx->cs.Sel, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER)); 408 408 } 409 409 else 410 410 { 411 411 if (pCtx->eflags.Bits.u1VM) 412 Log(("NEMV86-CPU%d: %08 XIF=%d\n", pVCpu->idCpu, pCtx->eip, pCtx->eflags.Bits.u1IF));412 Log(("NEMV86-CPU%d: %08x IF=%d\n", pVCpu->idCpu, pCtx->eip, pCtx->eflags.Bits.u1IF)); 413 413 else if (CPUMIsGuestIn64BitCodeEx(pCtx)) 414 Log(("NEMR%d-CPU%d: %04 X:%RGv ESP=%RGv IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pVCpu->idCpu, pCtx->cs.Sel, (RTGCPTR)pCtx->rip, pCtx->rsp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));414 Log(("NEMR%d-CPU%d: %04x:%RGv ESP=%RGv IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pVCpu->idCpu, pCtx->cs.Sel, (RTGCPTR)pCtx->rip, pCtx->rsp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER)); 415 415 else 416 Log(("NEMR%d-CPU%d: %04 X:%08XESP=%08X IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pVCpu->idCpu, pCtx->cs.Sel, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));416 Log(("NEMR%d-CPU%d: %04x:%08x ESP=%08X IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pVCpu->idCpu, pCtx->cs.Sel, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER)); 417 417 } 418 418 #endif /* LOG_ENABLED */ -
trunk/src/VBox/VMM/VMMR3/NEMR3Native-win.cpp
r72188 r72207 48 48 #include <VBox/vmm/em.h> 49 49 #include <VBox/vmm/apic.h> 50 #include <VBox/vmm/pdm.h> 50 51 #include "NEMInternal.h" 51 52 #include <VBox/vmm/vm.h> … … 1162 1163 STAMR3RegisterF(pVM, &pNemCpu->StatExitMemIntercept, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of intercepted memory exits", "/NEM/CPU%u/ExitMemIntercept", iCpu); 1163 1164 STAMR3RegisterF(pVM, &pNemCpu->StatExitHalt, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of HLT exits", "/NEM/CPU%u/ExitHalt", iCpu); 1165 STAMR3RegisterF(pVM, &pNemCpu->StatExitInterruptWindow, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of HLT exits", "/NEM/CPU%u/ExitInterruptWindow", iCpu); 1164 1166 STAMR3RegisterF(pVM, &pNemCpu->StatGetMsgTimeout, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of get message timeouts/alerts", "/NEM/CPU%u/GetMsgTimeout", iCpu); 1165 1167 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuSuccess, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of successful CPU stops", "/NEM/CPU%u/StopCpuSuccess", iCpu); … … 2045 2047 AssertMsg(rcPgmPending == VINF_SUCCESS, ("rcPgmPending=%Rrc\n", VBOXSTRICTRC_VAL(rcPgmPending) )); 2046 2048 } 2049 LogFlow(("nemR3NativeRunGC: returns %Rrc\n", VBOXSTRICTRC_VAL(rcStrict) )); 2047 2050 return rcStrict; 2048 2051 #endif -
trunk/src/VBox/VMM/include/NEMInternal.h
r71293 r72207 76 76 77 77 /** The CPUMCTX_EXTRN_XXX mask for IEM. */ 78 # define NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM CPUMCTX_EXTRN_ALL 78 # define NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI) 79 80 /** @name Windows: Interrupt window flags (NEM_WIN_INTW_F_XXX). 81 * @{ */ 82 # define NEM_WIN_INTW_F_NMI UINT8_C(0x01) 83 # define NEM_WIN_INTW_F_REGULAR UINT8_C(0x02) 84 # define NEM_WIN_INTW_F_PRIO_MASK UINT8_C(0x3c) 85 # define NEM_WIN_INTW_F_PRIO_SHIFT 2 86 /** @} */ 79 87 80 88 #endif /* RT_OS_WINDOWS */ … … 171 179 uint32_t u32Magic; 172 180 #ifdef RT_OS_WINDOWS 181 /** The current state of the interrupt windows (NEM_WIN_INTW_F_XXX). */ 182 uint8_t fCurrentInterruptWindows; 183 /** The desired state of the interrupt windows (NEM_WIN_INTW_F_XXX). */ 184 uint8_t fDesiredInterruptWindows; 185 /** Last copy of HV_X64_VP_EXECUTION_STATE::InterruptShadow. */ 186 bool fLastInterruptShadow : 1; 187 bool afPadding[1]; 173 188 # ifdef NEM_WIN_USE_OUR_OWN_RUN_API 174 189 /** Pending VERR_NEM_CHANGE_PGM_MODE or VERR_NEM_FLUSH_TLB. */ … … 218 233 STAMCOUNTER StatExitMemIntercept; 219 234 STAMCOUNTER StatExitHalt; 235 STAMCOUNTER StatExitInterruptWindow; 220 236 STAMCOUNTER StatGetMsgTimeout; 221 237 STAMCOUNTER StatStopCpuSuccess;
Note:
See TracChangeset
for help on using the changeset viewer.