Changeset 80346 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Aug 19, 2019 7:36:29 PM (5 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 9 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/NEMAllNativeTemplate-win.cpp.h
r80334 r80346 36 36 # define NEMWIN_NEED_GET_REGISTER 37 37 # if defined(IN_RING0) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) 38 # define NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_ pGVCpu, a_enmReg, a_Expr, a_Msg) \38 # define NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_enmReg, a_Expr, a_Msg) \ 39 39 do { \ 40 40 HV_REGISTER_VALUE TmpVal; \ 41 nemHCWinGetRegister(a_pVCpu, a_ pGVCpu, a_enmReg, &TmpVal); \41 nemHCWinGetRegister(a_pVCpu, a_enmReg, &TmpVal); \ 42 42 AssertMsg(a_Expr, a_Msg); \ 43 43 } while (0) 44 44 # else 45 # define NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_ pGVCpu, a_enmReg, a_Expr, a_Msg) \45 # define NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_enmReg, a_Expr, a_Msg) \ 46 46 do { \ 47 47 WHV_REGISTER_VALUE TmpVal; \ … … 51 51 # endif 52 52 #else 53 # define NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_ pGVCpu, a_enmReg, a_Expr, a_Msg) do { } while (0)53 # define NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_enmReg, a_Expr, a_Msg) do { } while (0) 54 54 #endif 55 55 … … 57 57 * Asserts the correctness of a 64-bit register value in a message/context. 58 58 */ 59 #define NEMWIN_ASSERT_MSG_REG_VAL64(a_pVCpu, a_ pGVCpu, a_enmReg, a_u64Val) \60 NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_ pGVCpu, a_enmReg, (a_u64Val) == TmpVal.Reg64, \59 #define NEMWIN_ASSERT_MSG_REG_VAL64(a_pVCpu, a_enmReg, a_u64Val) \ 60 NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_enmReg, (a_u64Val) == TmpVal.Reg64, \ 61 61 (#a_u64Val "=%#RX64, expected %#RX64\n", (a_u64Val), TmpVal.Reg64)) 62 62 /** @def NEMWIN_ASSERT_MSG_REG_VAL 63 63 * Asserts the correctness of a segment register value in a message/context. 64 64 */ 65 #define NEMWIN_ASSERT_MSG_REG_SEG(a_pVCpu, a_ pGVCpu, a_enmReg, a_SReg) \66 NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_ pGVCpu, a_enmReg, \65 #define NEMWIN_ASSERT_MSG_REG_SEG(a_pVCpu, a_enmReg, a_SReg) \ 66 NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_enmReg, \ 67 67 (a_SReg).Base == TmpVal.Segment.Base \ 68 68 && (a_SReg).Limit == TmpVal.Segment.Limit \ … … 109 109 #ifdef IN_RING0 110 110 /** @todo optimize further, caller generally has the physical address. */ 111 PGVM pGVM = GVMMR0FastGetGVMByVM(pVM); 112 AssertReturn(pGVM, VERR_INVALID_VM_HANDLE); 113 return nemR0WinMapPages(pGVM, pVM, &pGVM->aCpus[pVCpu->idCpu], 111 return nemR0WinMapPages(pVM, pVCpu, 114 112 GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, 115 113 GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, … … 136 134 { 137 135 # ifdef IN_RING0 138 PGVM pGVM = GVMMR0FastGetGVMByVM(pVM); 139 AssertReturn(pGVM, VERR_INVALID_VM_HANDLE); 140 return nemR0WinUnmapPages(pGVM, &pGVM->aCpus[pVCpu->idCpu], GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, 1); 136 return nemR0WinUnmapPages(pVM, pVCpu, GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, 1); 141 137 # else 142 138 pVCpu->nem.s.Hypercall.UnmapPages.GCPhys = GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK; … … 398 394 ADD_REG64(WHvX64RegisterTscAux, pCtxMsrs->msr.TscAux); 399 395 #if 0 /** @todo these registers aren't available? Might explain something.. .*/ 400 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(p GVM->pVM);396 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pVM); 401 397 if (enmCpuVendor != CPUMCPUVENDOR_AMD) 402 398 { … … 665 661 666 662 //#ifdef LOG_ENABLED 667 // const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(p GVM->pVM);663 // const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pVM); 668 664 //#endif 669 665 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS) … … 1291 1287 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API); 1292 1288 1293 pInput->PartitionId = p GVCpu->pGVM->nemr0.s.idHvPartition;1294 pInput->VpIndex = p GVCpu->idCpu;1289 pInput->PartitionId = pVCpu->pGVM->nemr0.s.idHvPartition; 1290 pInput->VpIndex = pVCpu->idCpu; 1295 1291 pInput->fFlags = 0; 1296 1292 pInput->Names[0] = (HV_REGISTER_NAME)enmReg; … … 1823 1819 * @param pGVM The global (ring-0) VM structure. 1824 1820 * @param pGVCpu The global (ring-0) per CPU structure. 1821 * @param fWhat What to import. 1822 * @param pszCaller Who is doing the importing. 1823 */ 1824 DECLINLINE(VBOXSTRICTRC) nemR0WinImportStateStrict(PGVM pGVM, PGVMCPU pGVCpu, uint64_t fWhat, const char *pszCaller) 1825 { 1826 int rc = nemR0WinImportState(pGVM, pGVCpu, &pGVCpu->cpum.GstCtx, fWhat, true /*fCanUpdateCr3*/); 1827 if (RT_SUCCESS(rc)) 1828 { 1829 Assert(rc == VINF_SUCCESS); 1830 return VINF_SUCCESS; 1831 } 1832 1833 if (rc == VERR_NEM_FLUSH_TLB) 1834 { 1835 Log4(("%s/%u: nemR0WinImportState -> %Rrc\n", pszCaller, pGVCpu->idCpu, -rc)); 1836 return -rc; 1837 } 1838 RT_NOREF(pszCaller); 1839 AssertMsgFailedReturn(("%s/%u: nemR0WinImportState failed: %Rrc\n", pszCaller, pGVCpu->idCpu, rc), rc); 1840 } 1841 #endif /* IN_RING0 && NEM_WIN_TEMPLATE_MODE_OWN_RUN_API*/ 1842 1843 #if defined(NEM_WIN_TEMPLATE_MODE_OWN_RUN_API) || defined(IN_RING3) 1844 /** 1845 * Wrapper around nemR0WinImportStateStrict and nemHCWinCopyStateFromHyperV. 1846 * 1847 * Unlike the wrapped APIs, this checks whether it's necessary. 1848 * 1849 * @returns VBox strict status code. 1825 1850 * @param pVCpu The cross context per CPU structure. 1826 1851 * @param fWhat What to import. 1827 1852 * @param pszCaller Who is doing the importing. 1828 1853 */ 1829 DECLINLINE(VBOXSTRICTRC) nemR0WinImportStateStrict(PGVM pGVM, PGVMCPU pGVCpu, PVMCPUCC pVCpu, uint64_t fWhat, const char *pszCaller) 1830 { 1831 int rc = nemR0WinImportState(pGVM, pGVCpu, &pVCpu->cpum.GstCtx, fWhat, true /*fCanUpdateCr3*/); 1832 if (RT_SUCCESS(rc)) 1833 { 1834 Assert(rc == VINF_SUCCESS); 1835 return VINF_SUCCESS; 1836 } 1837 1838 if (rc == VERR_NEM_FLUSH_TLB) 1839 { 1840 Log4(("%s/%u: nemR0WinImportState -> %Rrc\n", pszCaller, pGVCpu->idCpu, -rc)); 1841 return -rc; 1842 } 1843 RT_NOREF(pszCaller); 1844 AssertMsgFailedReturn(("%s/%u: nemR0WinImportState failed: %Rrc\n", pszCaller, pGVCpu->idCpu, rc), rc); 1845 } 1846 #endif /* IN_RING0 && NEM_WIN_TEMPLATE_MODE_OWN_RUN_API*/ 1847 1848 #if defined(NEM_WIN_TEMPLATE_MODE_OWN_RUN_API) || defined(IN_RING3) 1849 /** 1850 * Wrapper around nemR0WinImportStateStrict and nemHCWinCopyStateFromHyperV. 1851 * 1852 * Unlike the wrapped APIs, this checks whether it's necessary. 1853 * 1854 * @returns VBox strict status code. 1855 * @param pGVM The global (ring-0) VM structure. 1856 * @param pGVCpu The global (ring-0) per CPU structure. 1857 * @param fWhat What to import. 1858 * @param pszCaller Who is doing the importing. 1859 */ 1860 DECLINLINE(VBOXSTRICTRC) nemHCWinImportStateIfNeededStrict(PVMCPUCC pVCpu, PGVMCPU pGVCpu, uint64_t fWhat, const char *pszCaller) 1854 DECLINLINE(VBOXSTRICTRC) nemHCWinImportStateIfNeededStrict(PVMCPUCC pVCpu, uint64_t fWhat, const char *pszCaller) 1861 1855 { 1862 1856 if (pVCpu->cpum.GstCtx.fExtrn & fWhat) 1863 1857 { 1864 1858 # ifdef IN_RING0 1865 return nemR0WinImportStateStrict(p GVCpu->pGVM, pGVCpu, pVCpu, fWhat, pszCaller);1859 return nemR0WinImportStateStrict(pVCpu->pGVM, pVCpu, fWhat, pszCaller); 1866 1860 # else 1867 RT_NOREF(p GVCpu, pszCaller);1861 RT_NOREF(pszCaller); 1868 1862 int rc = nemHCWinCopyStateFromHyperV(pVCpu->pVMR3, pVCpu, fWhat); 1869 1863 AssertRCReturn(rc, rc); … … 1947 1941 * @param pVCpu The cross context per CPU structure. 1948 1942 * @param pMsg The message. 1949 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).1950 1943 * @sa nemR3WinHandleExitMemory 1951 1944 */ 1952 1945 NEM_TMPL_STATIC VBOXSTRICTRC 1953 nemHCWinHandleMessageMemory(PVMCC pVM, PVMCPUCC pVCpu, HV_X64_MEMORY_INTERCEPT_MESSAGE const *pMsg , PGVMCPU pGVCpu)1946 nemHCWinHandleMessageMemory(PVMCC pVM, PVMCPUCC pVCpu, HV_X64_MEMORY_INTERCEPT_MESSAGE const *pMsg) 1954 1947 { 1955 1948 uint64_t const uHostTsc = ASMReadTSC(); … … 2030 2023 VBOXSTRICTRC rcStrict; 2031 2024 # ifdef IN_RING0 2032 rcStrict = nemR0WinImportStateStrict(p GVCpu->pGVM, pGVCpu, pVCpu,2025 rcStrict = nemR0WinImportStateStrict(pVM, pVCpu, 2033 2026 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES, "MemExit"); 2034 2027 if (rcStrict != VINF_SUCCESS) … … 2037 2030 rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES); 2038 2031 AssertRCReturn(rc, rc); 2039 NOREF(pGVCpu);2040 2032 # endif 2041 2033 … … 2172 2164 * @param pVCpu The cross context per CPU structure. 2173 2165 * @param pMsg The message. 2174 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).2175 2166 */ 2176 2167 NEM_TMPL_STATIC VBOXSTRICTRC 2177 nemHCWinHandleMessageIoPort(PVMCC pVM, PVMCPUCC pVCpu, HV_X64_IO_PORT_INTERCEPT_MESSAGE const *pMsg , PGVMCPU pGVCpu)2168 nemHCWinHandleMessageIoPort(PVMCC pVM, PVMCPUCC pVCpu, HV_X64_IO_PORT_INTERCEPT_MESSAGE const *pMsg) 2178 2169 { 2179 2170 /* … … 2185 2176 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ 2186 2177 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE); 2187 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, pGVCpu,HvX64RegisterCs, pMsg->Header.CsSegment);2188 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu,HvX64RegisterRip, pMsg->Header.Rip);2189 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu,HvX64RegisterRflags, pMsg->Header.Rflags);2190 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu,HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8);2191 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu,HvX64RegisterRax, pMsg->Rax);2178 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterCs, pMsg->Header.CsSegment); 2179 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRip, pMsg->Header.Rip); 2180 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRflags, pMsg->Header.Rflags); 2181 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8); 2182 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRax, pMsg->Rax); 2192 2183 if (pMsg->AccessInfo.StringOp) 2193 2184 { 2194 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, pGVCpu,HvX64RegisterDs, pMsg->DsSegment);2195 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, pGVCpu,HvX64RegisterEs, pMsg->EsSegment);2196 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu,HvX64RegisterRcx, pMsg->Rcx);2197 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu,HvX64RegisterRsi, pMsg->Rsi);2198 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu,HvX64RegisterRdi, pMsg->Rdi);2185 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterDs, pMsg->DsSegment); 2186 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterEs, pMsg->EsSegment); 2187 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRcx, pMsg->Rcx); 2188 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRsi, pMsg->Rsi); 2189 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRdi, pMsg->Rdi); 2199 2190 } 2200 2191 … … 2308 2299 pVCpu->cpum.GstCtx.rsi = pMsg->Rsi; 2309 2300 # ifdef IN_RING0 2310 rcStrict = nemR0WinImportStateStrict(p GVCpu->pGVM, pGVCpu, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "IOExit");2301 rcStrict = nemR0WinImportStateStrict(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "IOExit"); 2311 2302 if (rcStrict != VINF_SUCCESS) 2312 2303 return rcStrict; … … 2314 2305 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM); 2315 2306 AssertRCReturn(rc, rc); 2316 RT_NOREF(pGVCpu);2317 2307 # endif 2318 2308 … … 2359 2349 2360 2350 # ifdef IN_RING0 2361 rcStrict = nemR0WinImportStateStrict(p GVCpu->pGVM, pGVCpu, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "IOExit");2351 rcStrict = nemR0WinImportStateStrict(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "IOExit"); 2362 2352 if (rcStrict != VINF_SUCCESS) 2363 2353 return rcStrict; … … 2365 2355 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM); 2366 2356 AssertRCReturn(rc, rc); 2367 RT_NOREF(pGVCpu);2368 2357 # endif 2369 2358 … … 2554 2543 * @param pVCpu The cross context per CPU structure. 2555 2544 * @param pMsg The message. 2556 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).2557 2545 * @sa nemR3WinHandleExitInterruptWindow 2558 2546 */ 2559 2547 NEM_TMPL_STATIC VBOXSTRICTRC 2560 nemHCWinHandleMessageInterruptWindow(PVMCC pVM, PVMCPUCC pVCpu, HV_X64_INTERRUPT_WINDOW_MESSAGE const *pMsg , PGVMCPU pGVCpu)2548 nemHCWinHandleMessageInterruptWindow(PVMCC pVM, PVMCPUCC pVCpu, HV_X64_INTERRUPT_WINDOW_MESSAGE const *pMsg) 2561 2549 { 2562 2550 /* … … 2580 2568 2581 2569 /** @todo call nemHCWinHandleInterruptFF */ 2582 RT_NOREF(pVM , pGVCpu);2570 RT_NOREF(pVM); 2583 2571 return VINF_SUCCESS; 2584 2572 } … … 2629 2617 * @param pVCpu The cross context per CPU structure. 2630 2618 * @param pMsg The message. 2631 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).2632 2619 * @sa nemR3WinHandleExitCpuId 2633 2620 */ 2634 NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageCpuId(PVMCC pVM, PVMCPUCC pVCpu, HV_X64_CPUID_INTERCEPT_MESSAGE const *pMsg, 2635 PGVMCPU pGVCpu) 2621 NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageCpuId(PVMCC pVM, PVMCPUCC pVCpu, HV_X64_CPUID_INTERCEPT_MESSAGE const *pMsg) 2636 2622 { 2637 2623 /* Check message register value sanity. */ 2638 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, pGVCpu,HvX64RegisterCs, pMsg->Header.CsSegment);2639 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu,HvX64RegisterRip, pMsg->Header.Rip);2640 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu,HvX64RegisterRflags, pMsg->Header.Rflags);2641 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu,HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8);2642 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu,HvX64RegisterRax, pMsg->Rax);2643 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu,HvX64RegisterRcx, pMsg->Rcx);2644 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu,HvX64RegisterRdx, pMsg->Rdx);2645 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu,HvX64RegisterRbx, pMsg->Rbx);2624 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterCs, pMsg->Header.CsSegment); 2625 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRip, pMsg->Header.Rip); 2626 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRflags, pMsg->Header.Rflags); 2627 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8); 2628 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRax, pMsg->Rax); 2629 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRcx, pMsg->Rcx); 2630 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRdx, pMsg->Rdx); 2631 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRbx, pMsg->Rbx); 2646 2632 2647 2633 /* Do exit history. */ … … 2698 2684 pMsg->DefaultResultRax, pMsg->DefaultResultRcx, pMsg->DefaultResultRdx, pMsg->DefaultResultRbx)); 2699 2685 # ifdef IN_RING0 2700 VBOXSTRICTRC rcStrict = nemR0WinImportStateStrict(p GVCpu->pGVM, pGVCpu, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "CpuIdExit");2686 VBOXSTRICTRC rcStrict = nemR0WinImportStateStrict(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "CpuIdExit"); 2701 2687 if (rcStrict != VINF_SUCCESS) 2702 2688 return rcStrict; … … 2705 2691 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM); 2706 2692 AssertRCReturn(rc, rc); 2707 RT_NOREF(pGVCpu);2708 2693 # endif 2709 2694 VBOXSTRICTRC rcStrictExec = EMHistoryExec(pVCpu, pExitRec, 0); … … 2796 2781 * @param pVCpu The cross context per CPU structure. 2797 2782 * @param pMsg The message. 2798 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).2799 2783 * @sa nemR3WinHandleExitMsr 2800 2784 */ 2801 NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageMsr(PVMCPUCC pVCpu, HV_X64_MSR_INTERCEPT_MESSAGE const *pMsg , PGVMCPU pGVCpu)2785 NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageMsr(PVMCPUCC pVCpu, HV_X64_MSR_INTERCEPT_MESSAGE const *pMsg) 2802 2786 { 2803 2787 /* … … 2806 2790 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ 2807 2791 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE); 2808 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, pGVCpu,HvX64RegisterCs, pMsg->Header.CsSegment);2809 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu,HvX64RegisterRip, pMsg->Header.Rip);2810 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu,HvX64RegisterRflags, pMsg->Header.Rflags);2811 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu,HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8);2812 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu,HvX64RegisterRax, pMsg->Rax);2813 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu,HvX64RegisterRdx, pMsg->Rdx);2792 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterCs, pMsg->Header.CsSegment); 2793 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRip, pMsg->Header.Rip); 2794 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRflags, pMsg->Header.Rflags); 2795 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8); 2796 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRax, pMsg->Rax); 2797 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRdx, pMsg->Rdx); 2814 2798 2815 2799 /* … … 2830 2814 2831 2815 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header); 2832 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu,2816 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, 2833 2817 (!pExitRec ? 0 : IEM_CPUMCTX_EXTRN_MUST_MASK) 2834 2818 | CPUMCTX_EXTRN_ALL_MSRS | CPUMCTX_EXTRN_CR0 … … 2930 2914 * If we get down here, we're supposed to #GP(0). 2931 2915 */ 2932 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu,NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL_MSRS, "MSR");2916 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL_MSRS, "MSR"); 2933 2917 if (rcStrict == VINF_SUCCESS) 2934 2918 { … … 2969 2953 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC()); 2970 2954 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext); 2971 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NULL,2955 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, 2972 2956 (!pExitRec ? 0 : IEM_CPUMCTX_EXTRN_MUST_MASK) 2973 2957 | CPUMCTX_EXTRN_ALL_MSRS | CPUMCTX_EXTRN_CR0 … … 3056 3040 * If we get down here, we're supposed to #GP(0). 3057 3041 */ 3058 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NULL, 3059 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL_MSRS, "MSR"); 3042 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL_MSRS, "MSR"); 3060 3043 if (rcStrict == VINF_SUCCESS) 3061 3044 { … … 3195 3178 * @param pVCpu The cross context per CPU structure. 3196 3179 * @param pMsg The message. 3197 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).3198 3180 * @sa nemR3WinHandleExitMsr 3199 3181 */ 3200 3182 NEM_TMPL_STATIC VBOXSTRICTRC 3201 nemHCWinHandleMessageException(PVMCPUCC pVCpu, HV_X64_EXCEPTION_INTERCEPT_MESSAGE const *pMsg , PGVMCPU pGVCpu)3183 nemHCWinHandleMessageException(PVMCPUCC pVCpu, HV_X64_EXCEPTION_INTERCEPT_MESSAGE const *pMsg) 3202 3184 { 3203 3185 /* … … 3207 3189 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE 3208 3190 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_EXECUTE); 3209 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, pGVCpu,HvX64RegisterCs, pMsg->Header.CsSegment);3210 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu,HvX64RegisterRip, pMsg->Header.Rip);3211 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu,HvX64RegisterRflags, pMsg->Header.Rflags);3212 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu,HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8);3213 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, pGVCpu,HvX64RegisterDs, pMsg->DsSegment);3214 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, pGVCpu,HvX64RegisterSs, pMsg->SsSegment);3215 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu,HvX64RegisterRax, pMsg->Rax);3216 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu,HvX64RegisterRcx, pMsg->Rcx);3217 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu,HvX64RegisterRdx, pMsg->Rdx);3218 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu,HvX64RegisterRbx, pMsg->Rbx);3219 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu,HvX64RegisterRsp, pMsg->Rsp);3220 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu,HvX64RegisterRbp, pMsg->Rbp);3221 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu,HvX64RegisterRsi, pMsg->Rsi);3222 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu,HvX64RegisterRdi, pMsg->Rdi);3223 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu,HvX64RegisterR8, pMsg->R8);3224 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu,HvX64RegisterR9, pMsg->R9);3225 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu,HvX64RegisterR10, pMsg->R10);3226 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu,HvX64RegisterR11, pMsg->R11);3227 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu,HvX64RegisterR12, pMsg->R12);3228 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu,HvX64RegisterR13, pMsg->R13);3229 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu,HvX64RegisterR14, pMsg->R14);3230 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu,HvX64RegisterR15, pMsg->R15);3191 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterCs, pMsg->Header.CsSegment); 3192 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRip, pMsg->Header.Rip); 3193 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRflags, pMsg->Header.Rflags); 3194 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8); 3195 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterDs, pMsg->DsSegment); 3196 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterSs, pMsg->SsSegment); 3197 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRax, pMsg->Rax); 3198 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRcx, pMsg->Rcx); 3199 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRdx, pMsg->Rdx); 3200 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRbx, pMsg->Rbx); 3201 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRsp, pMsg->Rsp); 3202 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRbp, pMsg->Rbp); 3203 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRsi, pMsg->Rsi); 3204 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRdi, pMsg->Rdi); 3205 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR8, pMsg->R8); 3206 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR9, pMsg->R9); 3207 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR10, pMsg->R10); 3208 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR11, pMsg->R11); 3209 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR12, pMsg->R12); 3210 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR13, pMsg->R13); 3211 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR14, pMsg->R14); 3212 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR15, pMsg->R15); 3231 3213 3232 3214 /* … … 3243 3225 if (pMsg->ExceptionVector == X86_XCPT_DB) 3244 3226 fWhat |= CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR7 | CPUMCTX_EXTRN_DR6; 3245 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu,fWhat, "Xcpt");3227 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, fWhat, "Xcpt"); 3246 3228 if (rcStrict != VINF_SUCCESS) 3247 3229 return rcStrict; … … 3341 3323 if (pExit->VpException.ExceptionType == X86_XCPT_DB) 3342 3324 fWhat |= CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR7 | CPUMCTX_EXTRN_DR6; 3343 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NULL,fWhat, "Xcpt");3325 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, fWhat, "Xcpt"); 3344 3326 if (rcStrict != VINF_SUCCESS) 3345 3327 return rcStrict; … … 3431 3413 * @param pVCpu The cross context per CPU structure. 3432 3414 * @param pMsgHdr The message header. 3433 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).3434 3415 * @sa nemR3WinHandleExitUnrecoverableException 3435 3416 */ 3436 3417 NEM_TMPL_STATIC VBOXSTRICTRC 3437 nemHCWinHandleMessageUnrecoverableException(PVMCPUCC pVCpu, HV_X64_INTERCEPT_MESSAGE_HEADER const *pMsgHdr , PGVMCPU pGVCpu)3418 nemHCWinHandleMessageUnrecoverableException(PVMCPUCC pVCpu, HV_X64_INTERCEPT_MESSAGE_HEADER const *pMsgHdr) 3438 3419 { 3439 3420 /* Check message register value sanity. */ 3440 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, pGVCpu,HvX64RegisterCs, pMsgHdr->CsSegment);3441 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu,HvX64RegisterRip, pMsgHdr->Rip);3442 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu,HvX64RegisterRflags, pMsgHdr->Rflags);3443 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu,HvX64RegisterCr8, (uint64_t)pMsgHdr->Cr8);3421 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterCs, pMsgHdr->CsSegment); 3422 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRip, pMsgHdr->Rip); 3423 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRflags, pMsgHdr->Rflags); 3424 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterCr8, (uint64_t)pMsgHdr->Cr8); 3444 3425 3445 3426 # if 0 … … 3458 3439 pMsgHdr->Rip + pMsgHdr->CsSegment.Base, ASMReadTSC()); 3459 3440 nemHCWinCopyStateFromX64Header(pVCpu, pMsgHdr); 3460 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, 3461 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL, "TripleExit"); 3441 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL, "TripleExit"); 3462 3442 if (rcStrict == VINF_SUCCESS) 3463 3443 { … … 3511 3491 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC()); 3512 3492 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext); 3513 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NULL, 3514 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL, "TripleExit"); 3493 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL, "TripleExit"); 3515 3494 if (rcStrict == VINF_SUCCESS) 3516 3495 { … … 3549 3528 * @param pVCpu The cross context per CPU structure. 3550 3529 * @param pMappingHeader The message slot mapping. 3551 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).3552 3530 * @sa nemR3WinHandleExit 3553 3531 */ 3554 3532 NEM_TMPL_STATIC VBOXSTRICTRC 3555 nemHCWinHandleMessage(PVMCC pVM, PVMCPUCC pVCpu, VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader , PGVMCPU pGVCpu)3533 nemHCWinHandleMessage(PVMCC pVM, PVMCPUCC pVCpu, VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader) 3556 3534 { 3557 3535 if (pMappingHeader->enmVidMsgType == VidMessageHypervisorMessage) … … 3564 3542 Assert(pMsg->Header.PayloadSize == RT_UOFFSETOF(HV_X64_MEMORY_INTERCEPT_MESSAGE, DsSegment)); 3565 3543 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemUnmapped); 3566 return nemHCWinHandleMessageMemory(pVM, pVCpu, &pMsg->X64MemoryIntercept , pGVCpu);3544 return nemHCWinHandleMessageMemory(pVM, pVCpu, &pMsg->X64MemoryIntercept); 3567 3545 3568 3546 case HvMessageTypeGpaIntercept: 3569 3547 Assert(pMsg->Header.PayloadSize == RT_UOFFSETOF(HV_X64_MEMORY_INTERCEPT_MESSAGE, DsSegment)); 3570 3548 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemIntercept); 3571 return nemHCWinHandleMessageMemory(pVM, pVCpu, &pMsg->X64MemoryIntercept , pGVCpu);3549 return nemHCWinHandleMessageMemory(pVM, pVCpu, &pMsg->X64MemoryIntercept); 3572 3550 3573 3551 case HvMessageTypeX64IoPortIntercept: 3574 3552 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64IoPortIntercept)); 3575 3553 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitPortIo); 3576 return nemHCWinHandleMessageIoPort(pVM, pVCpu, &pMsg->X64IoPortIntercept , pGVCpu);3554 return nemHCWinHandleMessageIoPort(pVM, pVCpu, &pMsg->X64IoPortIntercept); 3577 3555 3578 3556 case HvMessageTypeX64Halt: … … 3586 3564 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64InterruptWindow)); 3587 3565 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitInterruptWindow); 3588 return nemHCWinHandleMessageInterruptWindow(pVM, pVCpu, &pMsg->X64InterruptWindow , pGVCpu);3566 return nemHCWinHandleMessageInterruptWindow(pVM, pVCpu, &pMsg->X64InterruptWindow); 3589 3567 3590 3568 case HvMessageTypeX64CpuidIntercept: 3591 3569 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64CpuIdIntercept)); 3592 3570 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitCpuId); 3593 return nemHCWinHandleMessageCpuId(pVM, pVCpu, &pMsg->X64CpuIdIntercept , pGVCpu);3571 return nemHCWinHandleMessageCpuId(pVM, pVCpu, &pMsg->X64CpuIdIntercept); 3594 3572 3595 3573 case HvMessageTypeX64MsrIntercept: 3596 3574 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64MsrIntercept)); 3597 3575 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMsr); 3598 return nemHCWinHandleMessageMsr(pVCpu, &pMsg->X64MsrIntercept , pGVCpu);3576 return nemHCWinHandleMessageMsr(pVCpu, &pMsg->X64MsrIntercept); 3599 3577 3600 3578 case HvMessageTypeX64ExceptionIntercept: 3601 3579 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64ExceptionIntercept)); 3602 3580 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitException); 3603 return nemHCWinHandleMessageException(pVCpu, &pMsg->X64ExceptionIntercept , pGVCpu);3581 return nemHCWinHandleMessageException(pVCpu, &pMsg->X64ExceptionIntercept); 3604 3582 3605 3583 case HvMessageTypeUnrecoverableException: 3606 3584 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64InterceptHeader)); 3607 3585 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitUnrecoverable); 3608 return nemHCWinHandleMessageUnrecoverableException(pVCpu, &pMsg->X64InterceptHeader , pGVCpu);3586 return nemHCWinHandleMessageUnrecoverableException(pVCpu, &pMsg->X64InterceptHeader); 3609 3587 3610 3588 case HvMessageTypeInvalidVpRegisterValue: … … 3712 3690 * @returns NT status code. 3713 3691 * @param pGVM The ring-0 VM structure. 3714 * @param pGVCpu The ring-0 CPU structure. 3715 * @param pVCpu The calling cross context CPU structure. 3692 * @param pGVCpu The global (ring-0) per CPU structure. 3716 3693 * @param fFlags The wait flags. 3717 3694 * @param cMillies The timeout in milliseconds 3718 3695 */ 3719 static NTSTATUS nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(PGVM pGVM, PGVMCPU pGVCpu, PVMCPUCC pVCpu, 3720 uint32_t fFlags, uint32_t cMillies) 3721 { 3722 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pGVCpu->idCpu; 3723 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = fFlags; 3724 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = cMillies; 3725 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pVCpu, pGVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.uFunction, 3726 &pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext, 3696 static NTSTATUS nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(PGVM pGVM, PGVMCPU pGVCpu, uint32_t fFlags, uint32_t cMillies) 3697 { 3698 pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pGVCpu->idCpu; 3699 pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = fFlags; 3700 pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = cMillies; 3701 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pGVCpu, pGVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.uFunction, 3702 &pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext, 3727 3703 pGVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.cbInput, 3728 3704 NULL, 0); … … 3740 3716 || rcNt == STATUS_USER_APC /* just in case */) 3741 3717 { 3742 DBGFTRACE_CUSTOM(p VCpu->CTX_SUFF(pVM), "IoCtlMessageSlotHandleAndGetNextRestart/1 %#x (f=%#x)", rcNt, fFlags);3743 STAM_REL_COUNTER_INC(&p VCpu->nem.s.StatStopCpuPendingAlerts);3718 DBGFTRACE_CUSTOM(pGVCpu->CTX_SUFF(pVM), "IoCtlMessageSlotHandleAndGetNextRestart/1 %#x (f=%#x)", rcNt, fFlags); 3719 STAM_REL_COUNTER_INC(&pGVCpu->nem.s.StatStopCpuPendingAlerts); 3744 3720 Assert(fFlags & VID_MSHAGN_F_GET_NEXT_MESSAGE); 3745 3721 3746 p VCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pVCpu->idCpu;3747 p VCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = fFlags & ~VID_MSHAGN_F_HANDLE_MESSAGE;3748 p VCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = cMillies;3749 rcNt = nemR0NtPerformIoControl(pGVM, p VCpu, pGVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.uFunction,3750 &p VCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext,3722 pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pGVCpu->idCpu; 3723 pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = fFlags & ~VID_MSHAGN_F_HANDLE_MESSAGE; 3724 pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = cMillies; 3725 rcNt = nemR0NtPerformIoControl(pGVM, pGVCpu, pGVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.uFunction, 3726 &pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext, 3751 3727 pGVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.cbInput, 3752 3728 NULL, 0); 3753 DBGFTRACE_CUSTOM(p VCpu->CTX_SUFF(pVM), "IoCtlMessageSlotHandleAndGetNextRestart/2 %#x", rcNt);3729 DBGFTRACE_CUSTOM(pGVM, "IoCtlMessageSlotHandleAndGetNextRestart/2 %#x", rcNt); 3754 3730 } 3755 3731 return rcNt; 3756 3732 } 3757 3758 3733 #endif /* IN_RING0 */ 3759 3734 … … 3774 3749 * exit. 3775 3750 * @param pMappingHeader The message slot mapping. 3776 * @param pGVM The global (ring-0) VM structure (NULL in r3).3777 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).3778 3751 */ 3779 3752 NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinStopCpu(PVMCC pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict, 3780 VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader, 3781 PGVM pGVM, PGVMCPU pGVCpu) 3753 VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader) 3782 3754 { 3783 3755 # ifdef DBGFTRACE_ENABLED … … 3791 3763 DBGFTRACE_CUSTOM(pVM, "nemStop#0"); 3792 3764 # ifdef IN_RING0 3793 pVCpu->nem.s.uIoCtlBuf.idCpu = p GVCpu->idCpu;3794 NTSTATUS rcNt = nemR0NtPerformIoControl(p GVM, pVCpu, pGVM->nemr0.s.IoCtlStopVirtualProcessor.uFunction,3765 pVCpu->nem.s.uIoCtlBuf.idCpu = pVCpu->idCpu; 3766 NTSTATUS rcNt = nemR0NtPerformIoControl(pVM, pVCpu, pVM->nemr0.s.IoCtlStopVirtualProcessor.uFunction, 3795 3767 &pVCpu->nem.s.uIoCtlBuf.idCpu, sizeof(pVCpu->nem.s.uIoCtlBuf.idCpu), 3796 3768 NULL, 0); … … 3811 3783 return rcStrict; 3812 3784 } 3813 RT_NOREF(pGVM, pGVCpu);3814 3785 # endif 3815 3786 … … 3835 3806 */ 3836 3807 # ifdef IN_RING0 3837 rcNt = nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(p GVM, pGVCpu, pVCpu, VID_MSHAGN_F_GET_NEXT_MESSAGE, 30000 /*ms*/);3808 rcNt = nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(pVM, pVCpu, VID_MSHAGN_F_GET_NEXT_MESSAGE, 30000 /*ms*/); 3838 3809 DBGFTRACE_CUSTOM(pVM, "nemStop#1: %#x / %#x %#x %#x", rcNt, pMappingHeader->enmVidMsgType, pMappingHeader->cbMessage, 3839 3810 pMsgForTrace->Header.MessageType); … … 3853 3824 if (enmVidMsgType != VidMessageStopRequestComplete) 3854 3825 { 3855 VBOXSTRICTRC rcStrict2 = nemHCWinHandleMessage(pVM, pVCpu, pMappingHeader , pGVCpu);3826 VBOXSTRICTRC rcStrict2 = nemHCWinHandleMessage(pVM, pVCpu, pMappingHeader); 3856 3827 if (rcStrict2 != VINF_SUCCESS && RT_SUCCESS(rcStrict)) 3857 3828 rcStrict = rcStrict2; … … 3863 3834 */ 3864 3835 # ifdef IN_RING0 3865 rcNt = nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(p GVM, pGVCpu, pVCpu,3836 rcNt = nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(pVM, pVCpu, 3866 3837 VID_MSHAGN_F_HANDLE_MESSAGE | VID_MSHAGN_F_GET_NEXT_MESSAGE, 3867 3838 30000 /*ms*/); … … 3891 3862 */ 3892 3863 # ifdef IN_RING0 3893 rcNt = nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(p GVM, pGVCpu, pVCpu, VID_MSHAGN_F_HANDLE_MESSAGE, 30000 /*ms*/);3864 rcNt = nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(pVM, pVCpu, VID_MSHAGN_F_HANDLE_MESSAGE, 30000 /*ms*/); 3894 3865 DBGFTRACE_CUSTOM(pVM, "nemStop#3: %#x / %#x %#x %#x", rcNt, pMappingHeader->enmVidMsgType, 3895 3866 pMsgForTrace->Header.MessageType, pMappingHeader->cbMessage, pMsgForTrace->Header.MessageType); … … 3927 3898 * @param pVM The cross context VM structure. 3928 3899 * @param pVCpu The cross context per CPU structure. 3929 * @param pGVCpu The global (ring-0) per CPU structure.3930 3900 * @param pfInterruptWindows Where to return interrupt window flags. 3931 3901 */ 3932 NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleInterruptFF(PVMCC pVM, PVMCPUCC pVCpu, PGVMCPU pGVCpu,uint8_t *pfInterruptWindows)3902 NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleInterruptFF(PVMCC pVM, PVMCPUCC pVCpu, uint8_t *pfInterruptWindows) 3933 3903 { 3934 3904 Assert(!TRPMHasTrap(pVCpu)); … … 3961 3931 if (pVCpu->cpum.GstCtx.fExtrn & fNeedExtrn) 3962 3932 { 3963 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, 3964 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "IntFF"); 3933 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "IntFF"); 3965 3934 if (rcStrict != VINF_SUCCESS) 3966 3935 return rcStrict; … … 3977 3946 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 3978 3947 { 3979 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, 3980 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "NMI"); 3948 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "NMI"); 3981 3949 if (rcStrict == VINF_SUCCESS) 3982 3950 { … … 4000 3968 { 4001 3969 AssertCompile(NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT & CPUMCTX_EXTRN_APIC_TPR); 4002 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, 4003 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "NMI"); 3970 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "NMI"); 4004 3971 if (rcStrict == VINF_SUCCESS) 4005 3972 { … … 4035 4002 * @param pVM The cross context VM structure. 4036 4003 * @param pVCpu The cross context per CPU structure. 4037 * @param pGVM The ring-0 VM structure (NULL in ring-3).4038 * @param pGVCpu The ring-0 per CPU structure (NULL in ring-3).4039 4004 */ 4040 NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinRunGC(PVMCC pVM, PVMCPUCC pVCpu , PGVM pGVM, PGVMCPU pGVCpu)4005 NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinRunGC(PVMCC pVM, PVMCPUCC pVCpu) 4041 4006 { 4042 4007 LogFlow(("NEM/%u: %04x:%08RX64 efl=%#08RX64 <=\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags)); … … 4044 4009 if (LogIs3Enabled()) 4045 4010 nemHCWinLogState(pVM, pVCpu); 4046 # endif4047 # ifdef IN_RING04048 Assert(pVCpu->idCpu == pGVCpu->idCpu);4049 4011 # endif 4050 4012 … … 4102 4064 { 4103 4065 pVCpu->nem.s.fHandleAndGetFlags = 0; 4104 rcStrict = nemHCWinStopCpu(pVM, pVCpu, rcStrict, pMappingHeader , pGVM, pGVCpu);4066 rcStrict = nemHCWinStopCpu(pVM, pVCpu, rcStrict, pMappingHeader); 4105 4067 if (rcStrict == VINF_SUCCESS) 4106 4068 { /* likely */ } … … 4115 4077 4116 4078 /* Try inject interrupt. */ 4117 rcStrict = nemHCWinHandleInterruptFF(pVM, pVCpu, pGVCpu,&pVCpu->nem.s.fDesiredInterruptWindows);4079 rcStrict = nemHCWinHandleInterruptFF(pVM, pVCpu, &pVCpu->nem.s.fDesiredInterruptWindows); 4118 4080 if (rcStrict == VINF_SUCCESS) 4119 4081 { /* likely */ } … … 4148 4110 # endif 4149 4111 # ifdef IN_RING0 4150 int rc2 = nemR0WinExportState(p GVM, pGVCpu, &pVCpu->cpum.GstCtx);4112 int rc2 = nemR0WinExportState(pVM, pVCpu, &pVCpu->cpum.GstCtx); 4151 4113 # else 4152 4114 int rc2 = nemHCWinCopyStateToHyperV(pVM, pVCpu); 4153 RT_NOREF(pGVM, pGVCpu);4154 4115 # endif 4155 4116 AssertRCReturn(rc2, rc2); … … 4176 4137 { 4177 4138 # ifdef IN_RING0 4178 pVCpu->nem.s.uIoCtlBuf.idCpu = p GVCpu->idCpu;4179 NTSTATUS rcNt = nemR0NtPerformIoControl(p GVM, pVCpu, pGVM->nemr0.s.IoCtlStartVirtualProcessor.uFunction,4139 pVCpu->nem.s.uIoCtlBuf.idCpu = pVCpu->idCpu; 4140 NTSTATUS rcNt = nemR0NtPerformIoControl(pVM, pVCpu, pVM->nemr0.s.IoCtlStartVirtualProcessor.uFunction, 4180 4141 &pVCpu->nem.s.uIoCtlBuf.idCpu, sizeof(pVCpu->nem.s.uIoCtlBuf.idCpu), 4181 4142 NULL, 0); 4182 4143 LogFlow(("NEM/%u: IoCtlStartVirtualProcessor -> %#x\n", pVCpu->idCpu, rcNt)); 4183 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("VidStartVirtualProcessor failed for CPU #%u: %#x\n", p GVCpu->idCpu, rcNt),4144 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("VidStartVirtualProcessor failed for CPU #%u: %#x\n", pVCpu->idCpu, rcNt), 4184 4145 VERR_NEM_IPE_5); 4185 4146 # else … … 4211 4172 cMsWait = RT_MS_1SEC; 4212 4173 # ifdef IN_RING0 4213 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = p GVCpu->idCpu;4174 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pVCpu->idCpu; 4214 4175 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = pVCpu->nem.s.fHandleAndGetFlags; 4215 4176 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = cMsWait; 4216 NTSTATUS rcNt = nemR0NtPerformIoControl(p GVM, pVCpu, pGVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.uFunction,4177 NTSTATUS rcNt = nemR0NtPerformIoControl(pVM, pVCpu, pVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.uFunction, 4217 4178 &pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext, 4218 p GVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.cbInput,4179 pVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.cbInput, 4219 4180 NULL, 0); 4220 4181 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT); … … 4238 4199 */ 4239 4200 # ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API 4240 rcStrict = nemHCWinHandleMessage(pVM, pVCpu, pMappingHeader , pGVCpu);4201 rcStrict = nemHCWinHandleMessage(pVM, pVCpu, pMappingHeader); 4241 4202 pVCpu->nem.s.fHandleAndGetFlags |= VID_MSHAGN_F_HANDLE_MESSAGE; 4242 4203 # else … … 4315 4276 { 4316 4277 pVCpu->nem.s.fHandleAndGetFlags = 0; 4317 rcStrict = nemHCWinStopCpu(pVM, pVCpu, rcStrict, pMappingHeader , pGVM, pGVCpu);4278 rcStrict = nemHCWinStopCpu(pVM, pVCpu, rcStrict, pMappingHeader); 4318 4279 } 4319 4280 # endif … … 4343 4304 { 4344 4305 # ifdef IN_RING0 4345 int rc2 = nemR0WinImportState(p GVM, pGVCpu, &pVCpu->cpum.GstCtx, fImport | CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT,4306 int rc2 = nemR0WinImportState(pVM, pVCpu, &pVCpu->cpum.GstCtx, fImport | CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT, 4346 4307 true /*fCanUpdateCr3*/); 4347 4308 if (RT_SUCCESS(rc2)) -
trunk/src/VBox/VMM/VMMR0/GMMR0.cpp
r80334 r80346 1541 1541 * 1542 1542 * @param pGVM The global (ring-0) VM structure. 1543 * @param pVM The cross context VM structure.1544 1543 * @param idCpu The VCPU id - must be zero. 1545 1544 * @param cBasePages The number of pages that may be allocated for the base RAM and ROMs. … … 1553 1552 * @thread The creator thread / EMT(0). 1554 1553 */ 1555 GMMR0DECL(int) GMMR0InitialReservation(PGVM pGVM, PVMCC pVM,VMCPUID idCpu, uint64_t cBasePages, uint32_t cShadowPages,1554 GMMR0DECL(int) GMMR0InitialReservation(PGVM pGVM, VMCPUID idCpu, uint64_t cBasePages, uint32_t cShadowPages, 1556 1555 uint32_t cFixedPages, GMMOCPOLICY enmPolicy, GMMPRIORITY enmPriority) 1557 1556 { 1558 LogFlow(("GMMR0InitialReservation: pGVM=%p pVM=%pcBasePages=%#llx cShadowPages=%#x cFixedPages=%#x enmPolicy=%d enmPriority=%d\n",1559 pGVM, pVM,cBasePages, cShadowPages, cFixedPages, enmPolicy, enmPriority));1557 LogFlow(("GMMR0InitialReservation: pGVM=%p cBasePages=%#llx cShadowPages=%#x cFixedPages=%#x enmPolicy=%d enmPriority=%d\n", 1558 pGVM, cBasePages, cShadowPages, cFixedPages, enmPolicy, enmPriority)); 1560 1559 1561 1560 /* … … 1565 1564 PGMM pGMM; 1566 1565 GMM_GET_VALID_INSTANCE(pGMM, VERR_GMM_INSTANCE); 1567 int rc = GVMMR0ValidateGVMand VMandEMT(pGVM, pVM, idCpu);1566 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu); 1568 1567 if (RT_FAILURE(rc)) 1569 1568 return rc; … … 1619 1618 * @returns see GMMR0InitialReservation. 1620 1619 * @param pGVM The global (ring-0) VM structure. 1621 * @param pVM The cross context VM structure.1622 1620 * @param idCpu The VCPU id. 1623 1621 * @param pReq Pointer to the request packet. 1624 1622 */ 1625 GMMR0DECL(int) GMMR0InitialReservationReq(PGVM pGVM, PVMCC pVM,VMCPUID idCpu, PGMMINITIALRESERVATIONREQ pReq)1623 GMMR0DECL(int) GMMR0InitialReservationReq(PGVM pGVM, VMCPUID idCpu, PGMMINITIALRESERVATIONREQ pReq) 1626 1624 { 1627 1625 /* 1628 1626 * Validate input and pass it on. 1629 1627 */ 1630 AssertPtrReturn(p VM, VERR_INVALID_POINTER);1628 AssertPtrReturn(pGVM, VERR_INVALID_POINTER); 1631 1629 AssertPtrReturn(pReq, VERR_INVALID_POINTER); 1632 1630 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER); 1633 1631 1634 return GMMR0InitialReservation(pGVM, pVM,idCpu, pReq->cBasePages, pReq->cShadowPages,1632 return GMMR0InitialReservation(pGVM, idCpu, pReq->cBasePages, pReq->cShadowPages, 1635 1633 pReq->cFixedPages, pReq->enmPolicy, pReq->enmPriority); 1636 1634 } … … 1644 1642 * 1645 1643 * @param pGVM The global (ring-0) VM structure. 1646 * @param pVM The cross context VM structure.1647 1644 * @param idCpu The VCPU id. 1648 1645 * @param cBasePages The number of pages that may be allocated for the base RAM and ROMs. … … 1654 1651 * @thread EMT(idCpu) 1655 1652 */ 1656 GMMR0DECL(int) GMMR0UpdateReservation(PGVM pGVM, PVMCC pVM,VMCPUID idCpu, uint64_t cBasePages,1653 GMMR0DECL(int) GMMR0UpdateReservation(PGVM pGVM, VMCPUID idCpu, uint64_t cBasePages, 1657 1654 uint32_t cShadowPages, uint32_t cFixedPages) 1658 1655 { 1659 LogFlow(("GMMR0UpdateReservation: pGVM=%p pVM=%pcBasePages=%#llx cShadowPages=%#x cFixedPages=%#x\n",1660 pGVM, pVM,cBasePages, cShadowPages, cFixedPages));1656 LogFlow(("GMMR0UpdateReservation: pGVM=%p cBasePages=%#llx cShadowPages=%#x cFixedPages=%#x\n", 1657 pGVM, cBasePages, cShadowPages, cFixedPages)); 1661 1658 1662 1659 /* … … 1665 1662 PGMM pGMM; 1666 1663 GMM_GET_VALID_INSTANCE(pGMM, VERR_GMM_INSTANCE); 1667 int rc = GVMMR0ValidateGVMand VMandEMT(pGVM, pVM, idCpu);1664 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu); 1668 1665 if (RT_FAILURE(rc)) 1669 1666 return rc; … … 1716 1713 * @returns see GMMR0UpdateReservation. 1717 1714 * @param pGVM The global (ring-0) VM structure. 1718 * @param pVM The cross context VM structure.1719 1715 * @param idCpu The VCPU id. 1720 1716 * @param pReq Pointer to the request packet. 1721 1717 */ 1722 GMMR0DECL(int) GMMR0UpdateReservationReq(PGVM pGVM, PVMCC pVM,VMCPUID idCpu, PGMMUPDATERESERVATIONREQ pReq)1718 GMMR0DECL(int) GMMR0UpdateReservationReq(PGVM pGVM, VMCPUID idCpu, PGMMUPDATERESERVATIONREQ pReq) 1723 1719 { 1724 1720 /* 1725 1721 * Validate input and pass it on. 1726 1722 */ 1727 AssertPtrReturn(pVM, VERR_INVALID_POINTER);1728 1723 AssertPtrReturn(pReq, VERR_INVALID_POINTER); 1729 1724 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER); 1730 1725 1731 return GMMR0UpdateReservation(pGVM, pVM,idCpu, pReq->cBasePages, pReq->cShadowPages, pReq->cFixedPages);1726 return GMMR0UpdateReservation(pGVM, idCpu, pReq->cBasePages, pReq->cShadowPages, pReq->cFixedPages); 1732 1727 } 1733 1728 … … 2729 2724 * 2730 2725 * @param pGVM The global (ring-0) VM structure. 2731 * @param pVM The cross context VM structure.2732 2726 * @param idCpu The VCPU id. 2733 2727 * @param cPagesToUpdate The number of pages to update (starting from the head). … … 2737 2731 * @thread EMT(idCpu) 2738 2732 */ 2739 GMMR0DECL(int) GMMR0AllocateHandyPages(PGVM pGVM, PVMCC pVM,VMCPUID idCpu, uint32_t cPagesToUpdate,2733 GMMR0DECL(int) GMMR0AllocateHandyPages(PGVM pGVM, VMCPUID idCpu, uint32_t cPagesToUpdate, 2740 2734 uint32_t cPagesToAlloc, PGMMPAGEDESC paPages) 2741 2735 { 2742 LogFlow(("GMMR0AllocateHandyPages: pGVM=%p pVM=%pcPagesToUpdate=%#x cPagesToAlloc=%#x paPages=%p\n",2743 pGVM, pVM,cPagesToUpdate, cPagesToAlloc, paPages));2736 LogFlow(("GMMR0AllocateHandyPages: pGVM=%p cPagesToUpdate=%#x cPagesToAlloc=%#x paPages=%p\n", 2737 pGVM, cPagesToUpdate, cPagesToAlloc, paPages)); 2744 2738 2745 2739 /* … … 2749 2743 PGMM pGMM; 2750 2744 GMM_GET_VALID_INSTANCE(pGMM, VERR_GMM_INSTANCE); 2751 int rc = GVMMR0ValidateGVMand VMandEMT(pGVM, pVM, idCpu);2745 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu); 2752 2746 if (RT_FAILURE(rc)) 2753 2747 return rc; … … 2925 2919 * 2926 2920 * @param pGVM The global (ring-0) VM structure. 2927 * @param pVM The cross context VM structure.2928 2921 * @param idCpu The VCPU id. 2929 2922 * @param cPages The number of pages to allocate. … … 2935 2928 * @thread EMT. 2936 2929 */ 2937 GMMR0DECL(int) GMMR0AllocatePages(PGVM pGVM, PVMCC pVM,VMCPUID idCpu, uint32_t cPages, PGMMPAGEDESC paPages, GMMACCOUNT enmAccount)2938 { 2939 LogFlow(("GMMR0AllocatePages: pGVM=%p pVM=%p cPages=%#x paPages=%p enmAccount=%d\n", pGVM, pVM, cPages, paPages, enmAccount));2930 GMMR0DECL(int) GMMR0AllocatePages(PGVM pGVM, VMCPUID idCpu, uint32_t cPages, PGMMPAGEDESC paPages, GMMACCOUNT enmAccount) 2931 { 2932 LogFlow(("GMMR0AllocatePages: pGVM=%p cPages=%#x paPages=%p enmAccount=%d\n", pGVM, cPages, paPages, enmAccount)); 2940 2933 2941 2934 /* … … 2944 2937 PGMM pGMM; 2945 2938 GMM_GET_VALID_INSTANCE(pGMM, VERR_GMM_INSTANCE); 2946 int rc = GVMMR0ValidateGVMand VMandEMT(pGVM, pVM, idCpu);2939 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu); 2947 2940 if (RT_FAILURE(rc)) 2948 2941 return rc; … … 2991 2984 * @returns see GMMR0AllocatePages. 2992 2985 * @param pGVM The global (ring-0) VM structure. 2993 * @param pVM The cross context VM structure.2994 2986 * @param idCpu The VCPU id. 2995 2987 * @param pReq Pointer to the request packet. 2996 2988 */ 2997 GMMR0DECL(int) GMMR0AllocatePagesReq(PGVM pGVM, PVMCC pVM,VMCPUID idCpu, PGMMALLOCATEPAGESREQ pReq)2989 GMMR0DECL(int) GMMR0AllocatePagesReq(PGVM pGVM, VMCPUID idCpu, PGMMALLOCATEPAGESREQ pReq) 2998 2990 { 2999 2991 /* … … 3008 3000 VERR_INVALID_PARAMETER); 3009 3001 3010 return GMMR0AllocatePages(pGVM, pVM,idCpu, pReq->cPages, &pReq->aPages[0], pReq->enmAccount);3002 return GMMR0AllocatePages(pGVM, idCpu, pReq->cPages, &pReq->aPages[0], pReq->enmAccount); 3011 3003 } 3012 3004 … … 3027 3019 * 3028 3020 * @param pGVM The global (ring-0) VM structure. 3029 * @param pVM The cross context VM structure.3030 3021 * @param idCpu The VCPU id. 3031 3022 * @param cbPage Large page size. … … 3033 3024 * @param pHCPhys Where to return the host physical address of the page. 3034 3025 */ 3035 GMMR0DECL(int) GMMR0AllocateLargePage(PGVM pGVM, PVMCC pVM,VMCPUID idCpu, uint32_t cbPage, uint32_t *pIdPage, RTHCPHYS *pHCPhys)3036 { 3037 LogFlow(("GMMR0AllocateLargePage: pGVM=%p pVM=%p cbPage=%x\n", pGVM, pVM, cbPage));3026 GMMR0DECL(int) GMMR0AllocateLargePage(PGVM pGVM, VMCPUID idCpu, uint32_t cbPage, uint32_t *pIdPage, RTHCPHYS *pHCPhys) 3027 { 3028 LogFlow(("GMMR0AllocateLargePage: pGVM=%p cbPage=%x\n", pGVM, cbPage)); 3038 3029 3039 3030 AssertReturn(cbPage == GMM_CHUNK_SIZE, VERR_INVALID_PARAMETER); … … 3046 3037 PGMM pGMM; 3047 3038 GMM_GET_VALID_INSTANCE(pGMM, VERR_GMM_INSTANCE); 3048 int rc = GVMMR0ValidateGVMand VMandEMT(pGVM, pVM, idCpu);3039 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu); 3049 3040 if (RT_FAILURE(rc)) 3050 3041 return rc; … … 3134 3125 * @returns VBox status code: 3135 3126 * @param pGVM The global (ring-0) VM structure. 3136 * @param pVM The cross context VM structure.3137 3127 * @param idCpu The VCPU id. 3138 3128 * @param idPage The large page id. 3139 3129 */ 3140 GMMR0DECL(int) GMMR0FreeLargePage(PGVM pGVM, PVMCC pVM,VMCPUID idCpu, uint32_t idPage)3141 { 3142 LogFlow(("GMMR0FreeLargePage: pGVM=%p pVM=%p idPage=%x\n", pGVM, pVM, idPage));3130 GMMR0DECL(int) GMMR0FreeLargePage(PGVM pGVM, VMCPUID idCpu, uint32_t idPage) 3131 { 3132 LogFlow(("GMMR0FreeLargePage: pGVM=%p idPage=%x\n", pGVM, idPage)); 3143 3133 3144 3134 /* … … 3147 3137 PGMM pGMM; 3148 3138 GMM_GET_VALID_INSTANCE(pGMM, VERR_GMM_INSTANCE); 3149 int rc = GVMMR0ValidateGVMand VMandEMT(pGVM, pVM, idCpu);3139 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu); 3150 3140 if (RT_FAILURE(rc)) 3151 3141 return rc; … … 3201 3191 * @returns see GMMR0FreeLargePage. 3202 3192 * @param pGVM The global (ring-0) VM structure. 3203 * @param pVM The cross context VM structure.3204 3193 * @param idCpu The VCPU id. 3205 3194 * @param pReq Pointer to the request packet. 3206 3195 */ 3207 GMMR0DECL(int) GMMR0FreeLargePageReq(PGVM pGVM, PVMCC pVM,VMCPUID idCpu, PGMMFREELARGEPAGEREQ pReq)3196 GMMR0DECL(int) GMMR0FreeLargePageReq(PGVM pGVM, VMCPUID idCpu, PGMMFREELARGEPAGEREQ pReq) 3208 3197 { 3209 3198 /* … … 3215 3204 VERR_INVALID_PARAMETER); 3216 3205 3217 return GMMR0FreeLargePage(pGVM, pVM,idCpu, pReq->idPage);3206 return GMMR0FreeLargePage(pGVM, idCpu, pReq->idPage); 3218 3207 } 3219 3208 … … 3562 3551 * 3563 3552 * @param pGVM The global (ring-0) VM structure. 3564 * @param pVM The cross context VM structure.3565 3553 * @param idCpu The VCPU id. 3566 3554 * @param cPages The number of pages to allocate. … … 3570 3558 * @thread EMT. 3571 3559 */ 3572 GMMR0DECL(int) GMMR0FreePages(PGVM pGVM, PVMCC pVM,VMCPUID idCpu, uint32_t cPages, PGMMFREEPAGEDESC paPages, GMMACCOUNT enmAccount)3573 { 3574 LogFlow(("GMMR0FreePages: pGVM=%p pVM=%p cPages=%#x paPages=%p enmAccount=%d\n", pGVM, pVM, cPages, paPages, enmAccount));3560 GMMR0DECL(int) GMMR0FreePages(PGVM pGVM, VMCPUID idCpu, uint32_t cPages, PGMMFREEPAGEDESC paPages, GMMACCOUNT enmAccount) 3561 { 3562 LogFlow(("GMMR0FreePages: pGVM=%p cPages=%#x paPages=%p enmAccount=%d\n", pGVM, cPages, paPages, enmAccount)); 3575 3563 3576 3564 /* … … 3579 3567 PGMM pGMM; 3580 3568 GMM_GET_VALID_INSTANCE(pGMM, VERR_GMM_INSTANCE); 3581 int rc = GVMMR0ValidateGVMand VMandEMT(pGVM, pVM, idCpu);3569 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu); 3582 3570 if (RT_FAILURE(rc)) 3583 3571 return rc; … … 3614 3602 * @returns see GMMR0FreePages. 3615 3603 * @param pGVM The global (ring-0) VM structure. 3616 * @param pVM The cross context VM structure.3617 3604 * @param idCpu The VCPU id. 3618 3605 * @param pReq Pointer to the request packet. 3619 3606 */ 3620 GMMR0DECL(int) GMMR0FreePagesReq(PGVM pGVM, PVMCC pVM,VMCPUID idCpu, PGMMFREEPAGESREQ pReq)3607 GMMR0DECL(int) GMMR0FreePagesReq(PGVM pGVM, VMCPUID idCpu, PGMMFREEPAGESREQ pReq) 3621 3608 { 3622 3609 /* … … 3631 3618 VERR_INVALID_PARAMETER); 3632 3619 3633 return GMMR0FreePages(pGVM, pVM,idCpu, pReq->cPages, &pReq->aPages[0], pReq->enmAccount);3620 return GMMR0FreePages(pGVM, idCpu, pReq->cPages, &pReq->aPages[0], pReq->enmAccount); 3634 3621 } 3635 3622 … … 3652 3639 * 3653 3640 * @param pGVM The global (ring-0) VM structure. 3654 * @param pVM The cross context VM structure.3655 3641 * @param idCpu The VCPU id. 3656 3642 * @param enmAction Inflate/deflate/reset. … … 3659 3645 * @thread EMT(idCpu) 3660 3646 */ 3661 GMMR0DECL(int) GMMR0BalloonedPages(PGVM pGVM, PVMCC pVM,VMCPUID idCpu, GMMBALLOONACTION enmAction, uint32_t cBalloonedPages)3662 { 3663 LogFlow(("GMMR0BalloonedPages: pGVM=%p pVM=%penmAction=%d cBalloonedPages=%#x\n",3664 pGVM, pVM,enmAction, cBalloonedPages));3647 GMMR0DECL(int) GMMR0BalloonedPages(PGVM pGVM, VMCPUID idCpu, GMMBALLOONACTION enmAction, uint32_t cBalloonedPages) 3648 { 3649 LogFlow(("GMMR0BalloonedPages: pGVM=%p enmAction=%d cBalloonedPages=%#x\n", 3650 pGVM, enmAction, cBalloonedPages)); 3665 3651 3666 3652 AssertMsgReturn(cBalloonedPages < RT_BIT(32 - PAGE_SHIFT), ("%#x\n", cBalloonedPages), VERR_INVALID_PARAMETER); … … 3671 3657 PGMM pGMM; 3672 3658 GMM_GET_VALID_INSTANCE(pGMM, VERR_GMM_INSTANCE); 3673 int rc = GVMMR0ValidateGVMand VMandEMT(pGVM, pVM, idCpu);3659 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu); 3674 3660 if (RT_FAILURE(rc)) 3675 3661 return rc; … … 3784 3770 * @returns see GMMR0BalloonedPages. 3785 3771 * @param pGVM The global (ring-0) VM structure. 3786 * @param pVM The cross context VM structure.3787 3772 * @param idCpu The VCPU id. 3788 3773 * @param pReq Pointer to the request packet. 3789 3774 */ 3790 GMMR0DECL(int) GMMR0BalloonedPagesReq(PGVM pGVM, PVMCC pVM,VMCPUID idCpu, PGMMBALLOONEDPAGESREQ pReq)3775 GMMR0DECL(int) GMMR0BalloonedPagesReq(PGVM pGVM, VMCPUID idCpu, PGMMBALLOONEDPAGESREQ pReq) 3791 3776 { 3792 3777 /* … … 3798 3783 VERR_INVALID_PARAMETER); 3799 3784 3800 return GMMR0BalloonedPages(pGVM, pVM,idCpu, pReq->enmAction, pReq->cBalloonedPages);3785 return GMMR0BalloonedPages(pGVM, idCpu, pReq->enmAction, pReq->cBalloonedPages); 3801 3786 } 3802 3787 … … 3839 3824 * @returns VBox status code. 3840 3825 * @param pGVM The global (ring-0) VM structure. 3841 * @param pVM The cross context VM structure.3842 3826 * @param idCpu Cpu id. 3843 3827 * @param pReq Pointer to the request packet. … … 3845 3829 * @thread EMT(idCpu) 3846 3830 */ 3847 GMMR0DECL(int) GMMR0QueryMemoryStatsReq(PGVM pGVM, PVMCC pVM,VMCPUID idCpu, PGMMMEMSTATSREQ pReq)3831 GMMR0DECL(int) GMMR0QueryMemoryStatsReq(PGVM pGVM, VMCPUID idCpu, PGMMMEMSTATSREQ pReq) 3848 3832 { 3849 3833 /* … … 3860 3844 PGMM pGMM; 3861 3845 GMM_GET_VALID_INSTANCE(pGMM, VERR_GMM_INSTANCE); 3862 int rc = GVMMR0ValidateGVMand VMandEMT(pGVM, pVM, idCpu);3846 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu); 3863 3847 if (RT_FAILURE(rc)) 3864 3848 return rc; … … 4131 4115 * @returns VBox status code. 4132 4116 * @param pGVM The global (ring-0) VM structure. 4133 * @param pVM The cross context VM structure.4134 4117 * @param idChunkMap The chunk to map. NIL_GMM_CHUNKID if nothing to map. 4135 4118 * @param idChunkUnmap The chunk to unmap. NIL_GMM_CHUNKID if nothing to unmap. … … 4137 4120 * @thread EMT ??? 4138 4121 */ 4139 GMMR0DECL(int) GMMR0MapUnmapChunk(PGVM pGVM, PVMCC pVM,uint32_t idChunkMap, uint32_t idChunkUnmap, PRTR3PTR ppvR3)4140 { 4141 LogFlow(("GMMR0MapUnmapChunk: pGVM=%p pVM=%pidChunkMap=%#x idChunkUnmap=%#x ppvR3=%p\n",4142 pGVM, pVM,idChunkMap, idChunkUnmap, ppvR3));4122 GMMR0DECL(int) GMMR0MapUnmapChunk(PGVM pGVM, uint32_t idChunkMap, uint32_t idChunkUnmap, PRTR3PTR ppvR3) 4123 { 4124 LogFlow(("GMMR0MapUnmapChunk: pGVM=%p idChunkMap=%#x idChunkUnmap=%#x ppvR3=%p\n", 4125 pGVM, idChunkMap, idChunkUnmap, ppvR3)); 4143 4126 4144 4127 /* … … 4147 4130 PGMM pGMM; 4148 4131 GMM_GET_VALID_INSTANCE(pGMM, VERR_GMM_INSTANCE); 4149 int rc = GVMMR0ValidateGVM andVM(pGVM, pVM);4132 int rc = GVMMR0ValidateGVM(pGVM); 4150 4133 if (RT_FAILURE(rc)) 4151 4134 return rc; … … 4223 4206 * @returns see GMMR0MapUnmapChunk. 4224 4207 * @param pGVM The global (ring-0) VM structure. 4225 * @param pVM The cross context VM structure.4226 4208 * @param pReq Pointer to the request packet. 4227 4209 */ 4228 GMMR0DECL(int) GMMR0MapUnmapChunkReq(PGVM pGVM, P VMCC pVM, PGMMMAPUNMAPCHUNKREQ pReq)4210 GMMR0DECL(int) GMMR0MapUnmapChunkReq(PGVM pGVM, PGMMMAPUNMAPCHUNKREQ pReq) 4229 4211 { 4230 4212 /* … … 4234 4216 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER); 4235 4217 4236 return GMMR0MapUnmapChunk(pGVM, p VM, pReq->idChunkMap, pReq->idChunkUnmap, &pReq->pvR3);4218 return GMMR0MapUnmapChunk(pGVM, pReq->idChunkMap, pReq->idChunkUnmap, &pReq->pvR3); 4237 4219 } 4238 4220 … … 4246 4228 * @returns VBox status code. 4247 4229 * @param pGVM The global (ring-0) VM structure. 4248 * @param pVM The cross context VM structure.4249 4230 * @param idCpu The VCPU id. 4250 4231 * @param pvR3 Pointer to the chunk size memory block to lock down. 4251 4232 */ 4252 GMMR0DECL(int) GMMR0SeedChunk(PGVM pGVM, PVMCC pVM,VMCPUID idCpu, RTR3PTR pvR3)4233 GMMR0DECL(int) GMMR0SeedChunk(PGVM pGVM, VMCPUID idCpu, RTR3PTR pvR3) 4253 4234 { 4254 4235 /* … … 4257 4238 PGMM pGMM; 4258 4239 GMM_GET_VALID_INSTANCE(pGMM, VERR_GMM_INSTANCE); 4259 int rc = GVMMR0ValidateGVMand VMandEMT(pGVM, pVM, idCpu);4240 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu); 4260 4241 if (RT_FAILURE(rc)) 4261 4242 return rc; … … 4530 4511 * @returns VBox status code. 4531 4512 * @param pGVM The global (ring-0) VM structure. 4532 * @param pVM The cross context VM structure.4533 4513 * @param idCpu The VCPU id. 4534 4514 * @param enmGuestOS The guest OS type. … … 4541 4521 * @thread EMT(idCpu) 4542 4522 */ 4543 GMMR0DECL(int) GMMR0RegisterSharedModule(PGVM pGVM, PVMCC pVM,VMCPUID idCpu, VBOXOSFAMILY enmGuestOS, char *pszModuleName,4523 GMMR0DECL(int) GMMR0RegisterSharedModule(PGVM pGVM, VMCPUID idCpu, VBOXOSFAMILY enmGuestOS, char *pszModuleName, 4544 4524 char *pszVersion, RTGCPTR GCPtrModBase, uint32_t cbModule, 4545 4525 uint32_t cRegions, struct VMMDEVSHAREDREGIONDESC const *paRegions) … … 4554 4534 PGMM pGMM; 4555 4535 GMM_GET_VALID_INSTANCE(pGMM, VERR_GMM_INSTANCE); 4556 int rc = GVMMR0ValidateGVMand VMandEMT(pGVM, pVM, idCpu);4536 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu); 4557 4537 if (RT_FAILURE(rc)) 4558 4538 return rc; … … 4666 4646 #else 4667 4647 4668 NOREF(pGVM); NOREF( pVM); NOREF(idCpu); NOREF(enmGuestOS); NOREF(pszModuleName); NOREF(pszVersion);4648 NOREF(pGVM); NOREF(idCpu); NOREF(enmGuestOS); NOREF(pszModuleName); NOREF(pszVersion); 4669 4649 NOREF(GCPtrModBase); NOREF(cbModule); NOREF(cRegions); NOREF(paRegions); 4670 4650 return VERR_NOT_IMPLEMENTED; … … 4678 4658 * @returns see GMMR0RegisterSharedModule. 4679 4659 * @param pGVM The global (ring-0) VM structure. 4680 * @param pVM The cross context VM structure.4681 4660 * @param idCpu The VCPU id. 4682 4661 * @param pReq Pointer to the request packet. 4683 4662 */ 4684 GMMR0DECL(int) GMMR0RegisterSharedModuleReq(PGVM pGVM, PVMCC pVM,VMCPUID idCpu, PGMMREGISTERSHAREDMODULEREQ pReq)4663 GMMR0DECL(int) GMMR0RegisterSharedModuleReq(PGVM pGVM, VMCPUID idCpu, PGMMREGISTERSHAREDMODULEREQ pReq) 4685 4664 { 4686 4665 /* … … 4693 4672 4694 4673 /* Pass back return code in the request packet to preserve informational codes. (VMMR3CallR0 chokes on them) */ 4695 pReq->rc = GMMR0RegisterSharedModule(pGVM, pVM,idCpu, pReq->enmGuestOS, pReq->szName, pReq->szVersion,4674 pReq->rc = GMMR0RegisterSharedModule(pGVM, idCpu, pReq->enmGuestOS, pReq->szName, pReq->szVersion, 4696 4675 pReq->GCBaseAddr, pReq->cbModule, pReq->cRegions, pReq->aRegions); 4697 4676 return VINF_SUCCESS; … … 4704 4683 * @returns VBox status code. 4705 4684 * @param pGVM The global (ring-0) VM structure. 4706 * @param pVM The cross context VM structure.4707 4685 * @param idCpu The VCPU id. 4708 4686 * @param pszModuleName The module name. … … 4711 4689 * @param cbModule The module size. 4712 4690 */ 4713 GMMR0DECL(int) GMMR0UnregisterSharedModule(PGVM pGVM, PVMCC pVM,VMCPUID idCpu, char *pszModuleName, char *pszVersion,4691 GMMR0DECL(int) GMMR0UnregisterSharedModule(PGVM pGVM, VMCPUID idCpu, char *pszModuleName, char *pszVersion, 4714 4692 RTGCPTR GCPtrModBase, uint32_t cbModule) 4715 4693 { … … 4720 4698 PGMM pGMM; 4721 4699 GMM_GET_VALID_INSTANCE(pGMM, VERR_GMM_INSTANCE); 4722 int rc = GVMMR0ValidateGVMand VMandEMT(pGVM, pVM, idCpu);4700 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu); 4723 4701 if (RT_FAILURE(rc)) 4724 4702 return rc; … … 4763 4741 #else 4764 4742 4765 NOREF(pGVM); NOREF( pVM); NOREF(idCpu); NOREF(pszModuleName); NOREF(pszVersion); NOREF(GCPtrModBase); NOREF(cbModule);4743 NOREF(pGVM); NOREF(idCpu); NOREF(pszModuleName); NOREF(pszVersion); NOREF(GCPtrModBase); NOREF(cbModule); 4766 4744 return VERR_NOT_IMPLEMENTED; 4767 4745 #endif … … 4774 4752 * @returns see GMMR0UnregisterSharedModule. 4775 4753 * @param pGVM The global (ring-0) VM structure. 4776 * @param pVM The cross context VM structure.4777 4754 * @param idCpu The VCPU id. 4778 4755 * @param pReq Pointer to the request packet. 4779 4756 */ 4780 GMMR0DECL(int) GMMR0UnregisterSharedModuleReq(PGVM pGVM, PVMCC pVM,VMCPUID idCpu, PGMMUNREGISTERSHAREDMODULEREQ pReq)4757 GMMR0DECL(int) GMMR0UnregisterSharedModuleReq(PGVM pGVM, VMCPUID idCpu, PGMMUNREGISTERSHAREDMODULEREQ pReq) 4781 4758 { 4782 4759 /* … … 4786 4763 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER); 4787 4764 4788 return GMMR0UnregisterSharedModule(pGVM, pVM,idCpu, pReq->szName, pReq->szVersion, pReq->GCBaseAddr, pReq->cbModule);4765 return GMMR0UnregisterSharedModule(pGVM, idCpu, pReq->szName, pReq->szVersion, pReq->GCBaseAddr, pReq->cbModule); 4789 4766 } 4790 4767 … … 5081 5058 * @returns VBox status code. 5082 5059 * @param pGVM The global (ring-0) VM structure. 5083 * @param pVM The cross context VM structure.5084 5060 * @param idCpu The VCPU id. 5085 5061 */ 5086 GMMR0DECL(int) GMMR0ResetSharedModules(PGVM pGVM, PVMCC pVM,VMCPUID idCpu)5062 GMMR0DECL(int) GMMR0ResetSharedModules(PGVM pGVM, VMCPUID idCpu) 5087 5063 { 5088 5064 #ifdef VBOX_WITH_PAGE_SHARING … … 5092 5068 PGMM pGMM; 5093 5069 GMM_GET_VALID_INSTANCE(pGMM, VERR_GMM_INSTANCE); 5094 int rc = GVMMR0ValidateGVMand VMandEMT(pGVM, pVM, idCpu);5070 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu); 5095 5071 if (RT_FAILURE(rc)) 5096 5072 return rc; … … 5118 5094 return rc; 5119 5095 #else 5120 RT_NOREF(pGVM, pVM,idCpu);5096 RT_NOREF(pGVM, idCpu); 5121 5097 return VERR_NOT_IMPLEMENTED; 5122 5098 #endif … … 5150 5126 * @returns VBox status code. 5151 5127 * @param pGVM The global (ring-0) VM structure. 5152 * @param pVM The cross context VM structure.5153 5128 * @param idCpu The calling EMT number. 5154 5129 * @thread EMT(idCpu) 5155 5130 */ 5156 GMMR0DECL(int) GMMR0CheckSharedModules(PGVM pGVM, PVMCC pVM,VMCPUID idCpu)5131 GMMR0DECL(int) GMMR0CheckSharedModules(PGVM pGVM, VMCPUID idCpu) 5157 5132 { 5158 5133 #ifdef VBOX_WITH_PAGE_SHARING … … 5162 5137 PGMM pGMM; 5163 5138 GMM_GET_VALID_INSTANCE(pGMM, VERR_GMM_INSTANCE); 5164 int rc = GVMMR0ValidateGVMand VMandEMT(pGVM, pVM, idCpu);5139 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu); 5165 5140 if (RT_FAILURE(rc)) 5166 5141 return rc; … … 5195 5170 return rc; 5196 5171 #else 5197 RT_NOREF(pGVM, pVM,idCpu);5172 RT_NOREF(pGVM, idCpu); 5198 5173 return VERR_NOT_IMPLEMENTED; 5199 5174 #endif … … 5252 5227 * @returns VBox status code. 5253 5228 * @param pGVM The global (ring-0) VM structure. 5254 * @param pVM The cross context VM structure.5255 5229 * @param pReq Pointer to the request packet. 5256 5230 */ 5257 GMMR0DECL(int) GMMR0FindDuplicatePageReq(PGVM pGVM, P VMCC pVM, PGMMFINDDUPLICATEPAGEREQ pReq)5231 GMMR0DECL(int) GMMR0FindDuplicatePageReq(PGVM pGVM, PGMMFINDDUPLICATEPAGEREQ pReq) 5258 5232 { 5259 5233 /* … … 5266 5240 GMM_GET_VALID_INSTANCE(pGMM, VERR_GMM_INSTANCE); 5267 5241 5268 int rc = GVMMR0ValidateGVM andVM(pGVM, pVM);5242 int rc = GVMMR0ValidateGVM(pGVM); 5269 5243 if (RT_FAILURE(rc)) 5270 5244 return rc; … … 5325 5299 * @param pSession The current session. 5326 5300 * @param pGVM The GVM to obtain statistics for. Optional. 5327 * @param pVM The VM structure corresponding to @a pGVM. 5328 */ 5329 GMMR0DECL(int) GMMR0QueryStatistics(PGMMSTATS pStats, PSUPDRVSESSION pSession, PGVM pGVM, PVMCC pVM) 5330 { 5331 LogFlow(("GVMMR0QueryStatistics: pStats=%p pSession=%p pGVM=%p pVM=%p\n", pStats, pSession, pGVM, pVM)); 5301 */ 5302 GMMR0DECL(int) GMMR0QueryStatistics(PGMMSTATS pStats, PSUPDRVSESSION pSession, PGVM pGVM) 5303 { 5304 LogFlow(("GVMMR0QueryStatistics: pStats=%p pSession=%p pGVM=%p\n", pStats, pSession, pGVM)); 5332 5305 5333 5306 /* … … 5347 5320 if (pGVM) 5348 5321 { 5349 rc = GVMMR0ValidateGVM andVM(pGVM, pVM);5322 rc = GVMMR0ValidateGVM(pGVM); 5350 5323 if (RT_FAILURE(rc)) 5351 5324 return rc; … … 5390 5363 * @returns see GMMR0QueryStatistics. 5391 5364 * @param pGVM The global (ring-0) VM structure. Optional. 5392 * @param pVM The cross context VM structure. Optional.5393 5365 * @param pReq Pointer to the request packet. 5394 5366 */ 5395 GMMR0DECL(int) GMMR0QueryStatisticsReq(PGVM pGVM, P VMCC pVM, PGMMQUERYSTATISTICSSREQ pReq)5367 GMMR0DECL(int) GMMR0QueryStatisticsReq(PGVM pGVM, PGMMQUERYSTATISTICSSREQ pReq) 5396 5368 { 5397 5369 /* … … 5401 5373 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER); 5402 5374 5403 return GMMR0QueryStatistics(&pReq->Stats, pReq->pSession, pGVM , pVM);5375 return GMMR0QueryStatistics(&pReq->Stats, pReq->pSession, pGVM); 5404 5376 } 5405 5377 … … 5414 5386 * @param pSession The current session. 5415 5387 * @param pGVM The GVM to reset statistics for. Optional. 5416 * @param pVM The VM structure corresponding to @a pGVM. 5417 */ 5418 GMMR0DECL(int) GMMR0ResetStatistics(PCGMMSTATS pStats, PSUPDRVSESSION pSession, PGVM pGVM, PVMCC pVM) 5419 { 5420 NOREF(pStats); NOREF(pSession); NOREF(pVM); NOREF(pGVM); 5388 */ 5389 GMMR0DECL(int) GMMR0ResetStatistics(PCGMMSTATS pStats, PSUPDRVSESSION pSession, PGVM pGVM) 5390 { 5391 NOREF(pStats); NOREF(pSession); NOREF(pGVM); 5421 5392 /* Currently nothing we can reset at the moment. */ 5422 5393 return VINF_SUCCESS; … … 5429 5400 * @returns see GMMR0ResetStatistics. 5430 5401 * @param pGVM The global (ring-0) VM structure. Optional. 5431 * @param pVM The cross context VM structure. Optional.5432 5402 * @param pReq Pointer to the request packet. 5433 5403 */ 5434 GMMR0DECL(int) GMMR0ResetStatisticsReq(PGVM pGVM, P VMCC pVM, PGMMRESETSTATISTICSSREQ pReq)5404 GMMR0DECL(int) GMMR0ResetStatisticsReq(PGVM pGVM, PGMMRESETSTATISTICSSREQ pReq) 5435 5405 { 5436 5406 /* … … 5440 5410 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER); 5441 5411 5442 return GMMR0ResetStatistics(&pReq->Stats, pReq->pSession, pGVM , pVM);5443 } 5444 5412 return GMMR0ResetStatistics(&pReq->Stats, pReq->pSession, pGVM); 5413 } 5414 -
trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp
r80336 r80346 119 119 } \ 120 120 } while (0) 121 # define GVMM_CHECK_SMAP_CHECK2(a_p VM, a_BadExpr) \121 # define GVMM_CHECK_SMAP_CHECK2(a_pGVM, a_BadExpr) \ 122 122 do { \ 123 123 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \ … … 128 128 else \ 129 129 { \ 130 SUPR0BadContext((a_p VM) ? (a_pVM)->pSession : NULL, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \130 SUPR0BadContext((a_pGVM) ? (a_pGVM)->pSession : NULL, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \ 131 131 a_BadExpr; \ 132 132 } \ … … 136 136 # define GVMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0 137 137 # define GVMM_CHECK_SMAP_CHECK(a_BadExpr) NOREF(fKernelFeatures) 138 # define GVMM_CHECK_SMAP_CHECK2(a_p VM, a_BadExpr) NOREF(fKernelFeatures)138 # define GVMM_CHECK_SMAP_CHECK2(a_pGVM, a_BadExpr) NOREF(fKernelFeatures) 139 139 #endif 140 140 … … 159 159 /** The pointer to the ring-0 only (aka global) VM structure. */ 160 160 PGVM pGVM; 161 /** The ring-0 mapping of the shared VM instance data. */ 162 PVMCC pVM; 161 /** The ring-0 mapping of the shared VM instance data. 162 * @todo remove this */ 163 PVMCC pVMRemoveMe; 163 164 /** The virtual machine object. */ 164 165 void *pvObj; … … 361 362 static void gvmmR0InitPerVMData(PGVM pGVM, int16_t hSelf, VMCPUID cCpus, PSUPDRVSESSION pSession); 362 363 static DECLCALLBACK(void) gvmmR0HandleObjDestructor(void *pvObj, void *pvGVMM, void *pvHandle); 363 static int gvmmR0ByGVM andVM(PGVM pGVM, PVMCC pVM, PGVMM *ppGVMM, bool fTakeUsedLock);364 static int gvmmR0ByGVMand VMandEMT(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, PGVMM *ppGVMM);364 static int gvmmR0ByGVM(PGVM pGVM, PGVMM *ppGVMM, bool fTakeUsedLock); 365 static int gvmmR0ByGVMandEMT(PGVM pGVM, VMCPUID idCpu, PGVMM *ppGVMM); 365 366 366 367 #ifdef GVMM_SCHED_WITH_PPT … … 528 529 PGVMM pGVMM = g_pGVMM; 529 530 g_pGVMM = NULL; 530 if (RT_UNLIKELY(! VALID_PTR(pGVMM)))531 if (RT_UNLIKELY(!RT_VALID_PTR(pGVMM))) 531 532 { 532 533 SUPR0Printf("GVMMR0Term: pGVMM=%RKv\n", pGVMM); … … 779 780 * Validate the request. 780 781 */ 781 if (! VALID_PTR(pReq))782 if (!RT_VALID_PTR(pReq)) 782 783 return VERR_INVALID_POINTER; 783 784 if (pReq->Hdr.cbReq != sizeof(*pReq)) … … 789 790 * Execute it. 790 791 */ 791 P VMCC pVM;792 PGVM pGVM; 792 793 pReq->pVMR0 = NULL; 793 794 pReq->pVMR3 = NIL_RTR3PTR; 794 int rc = GVMMR0CreateVM(pSession, pReq->cCpus, &p VM);795 int rc = GVMMR0CreateVM(pSession, pReq->cCpus, &pGVM); 795 796 if (RT_SUCCESS(rc)) 796 797 { 797 pReq->pVMR0 = p VM;798 pReq->pVMR3 = p VM->pVMR3;798 pReq->pVMR0 = pGVM; /** @todo don't expose this to ring-3, use a unique random number instead. */ 799 pReq->pVMR3 = pGVM->pVMR3; 799 800 } 800 801 return rc; … … 810 811 * @param pSession The support driver session. 811 812 * @param cCpus Number of virtual CPUs for the new VM. 812 * @param pp VMWhere to store the pointer to the VM structure.813 * @param ppGVM Where to store the pointer to the VM structure. 813 814 * 814 815 * @thread EMT. 815 816 */ 816 GVMMR0DECL(int) GVMMR0CreateVM(PSUPDRVSESSION pSession, uint32_t cCpus, P VMCC *ppVM)817 GVMMR0DECL(int) GVMMR0CreateVM(PSUPDRVSESSION pSession, uint32_t cCpus, PGVM *ppGVM) 817 818 { 818 819 LogFlow(("GVMMR0CreateVM: pSession=%p\n", pSession)); … … 820 821 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_GVMM_INSTANCE); 821 822 822 AssertPtrReturn(pp VM, VERR_INVALID_POINTER);823 *pp VM = NULL;823 AssertPtrReturn(ppGVM, VERR_INVALID_POINTER); 824 *ppGVM = NULL; 824 825 825 826 if ( cCpus == 0 … … 857 858 858 859 /* consistency checks, a bit paranoid as always. */ 859 if ( !pHandle->pVM 860 if ( !pHandle->pVMRemoveMe 860 861 && !pHandle->pGVM 861 862 && !pHandle->pvObj … … 876 877 pGVMM->cVMs++; 877 878 878 pHandle->pVM 879 pHandle->pVMRemoveMe = NULL; 879 880 pHandle->pGVM = NULL; 880 881 pHandle->pSession = pSession; … … 967 968 AssertRC(rc); 968 969 969 pHandle->pVM 970 pHandle->pVMRemoveMe = pGVM; 970 971 pHandle->pGVM = pGVM; 971 972 pHandle->hEMT0 = hEMT0; … … 994 995 CPUMR0RegisterVCpuThread(&pGVM->aCpus[0]); 995 996 996 *pp VM = pGVM;997 *ppGVM = pGVM; 997 998 Log(("GVMMR0CreateVM: pVMR3=%p pGVM=%p hGVM=%d\n", pVMR3, pGVM, iHandle)); 998 999 return VINF_SUCCESS; … … 1192 1193 * @returns VBox status code. 1193 1194 * @param pGVM The global (ring-0) VM structure. 1194 * @param pVM The cross context VM structure.1195 1195 * 1196 1196 * @thread EMT(0) if it's associated with the VM, otherwise any thread. 1197 1197 */ 1198 GVMMR0DECL(int) GVMMR0DestroyVM(PGVM pGVM , PVMCC pVM)1199 { 1200 LogFlow(("GVMMR0DestroyVM: pGVM=%p pVM=%p\n", pGVM, pVM));1198 GVMMR0DECL(int) GVMMR0DestroyVM(PGVM pGVM) 1199 { 1200 LogFlow(("GVMMR0DestroyVM: pGVM=%p\n", pGVM)); 1201 1201 PGVMM pGVMM; 1202 1202 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_GVMM_INSTANCE); … … 1206 1206 */ 1207 1207 AssertPtrReturn(pGVM, VERR_INVALID_POINTER); 1208 AssertPtrReturn(pVM, VERR_INVALID_POINTER); 1209 AssertReturn(!((uintptr_t)pVM & PAGE_OFFSET_MASK), VERR_INVALID_POINTER); 1210 AssertReturn(pGVM == pVM, VERR_INVALID_POINTER); 1211 AssertMsgReturn(pVM->enmVMState >= VMSTATE_CREATING && pVM->enmVMState <= VMSTATE_TERMINATED, ("%d\n", pVM->enmVMState), 1208 AssertReturn(!((uintptr_t)pGVM & PAGE_OFFSET_MASK), VERR_INVALID_POINTER); 1209 AssertMsgReturn(pGVM->enmVMState >= VMSTATE_CREATING && pGVM->enmVMState <= VMSTATE_TERMINATED, ("%d\n", pGVM->enmVMState), 1212 1210 VERR_WRONG_ORDER); 1213 1211 … … 1218 1216 1219 1217 PGVMHANDLE pHandle = &pGVMM->aHandles[hGVM]; 1220 AssertReturn(pHandle->p VM == pVM, VERR_NOT_OWNER);1218 AssertReturn(pHandle->pGVM == pGVM, VERR_NOT_OWNER); 1221 1219 1222 1220 RTPROCESS ProcId = RTProcSelf(); … … 1235 1233 1236 1234 /* Be careful here because we might theoretically be racing someone else cleaning up. */ 1237 if ( pHandle->p VM == pVM1235 if ( pHandle->pGVM == pGVM 1238 1236 && ( ( pHandle->hEMT0 == hSelf 1239 1237 && pHandle->ProcId == ProcId) 1240 1238 || pHandle->hEMT0 == NIL_RTNATIVETHREAD) 1241 && VALID_PTR(pHandle->pvObj)1242 && VALID_PTR(pHandle->pSession)1243 && VALID_PTR(pHandle->pGVM)1239 && RT_VALID_PTR(pHandle->pvObj) 1240 && RT_VALID_PTR(pHandle->pSession) 1241 && RT_VALID_PTR(pHandle->pGVM) 1244 1242 && pHandle->pGVM->u32Magic == GVM_MAGIC) 1245 1243 { … … 1265 1263 else 1266 1264 { 1267 SUPR0Printf("GVMMR0DestroyVM: pHandle=%RKv:{.p VM=%p, .hEMT0=%p, .ProcId=%u, .pvObj=%p} pVM=%p hSelf=%p\n",1268 pHandle, pHandle->p VM, pHandle->hEMT0, pHandle->ProcId, pHandle->pvObj, pVM, hSelf);1265 SUPR0Printf("GVMMR0DestroyVM: pHandle=%RKv:{.pGVM=%p, .hEMT0=%p, .ProcId=%u, .pvObj=%p} pGVM=%p hSelf=%p\n", 1266 pHandle, pHandle->pGVM, pHandle->hEMT0, pHandle->ProcId, pHandle->pvObj, pGVM, hSelf); 1269 1267 gvmmR0CreateDestroyUnlock(pGVMM); 1270 1268 rc = VERR_GVMM_IPE_2; … … 1289 1287 { 1290 1288 LogFlow(("gvmmR0CleanupVM: Calling VMMR0TermVM\n")); 1291 VMMR0TermVM(pGVM, pGVM,NIL_VMCPUID);1289 VMMR0TermVM(pGVM, NIL_VMCPUID); 1292 1290 } 1293 1291 else … … 1399 1397 */ 1400 1398 PGVM pGVM = pHandle->pGVM; 1401 if ( VALID_PTR(pGVM)1399 if ( RT_VALID_PTR(pGVM) 1402 1400 && pGVM->u32Magic == GVM_MAGIC) 1403 1401 { … … 1465 1463 pGVMM->iFreeHead = iHandle; 1466 1464 ASMAtomicWriteNullPtr(&pHandle->pGVM); 1467 ASMAtomicWriteNullPtr(&pHandle->pVM );1465 ASMAtomicWriteNullPtr(&pHandle->pVMRemoveMe); 1468 1466 ASMAtomicWriteNullPtr(&pHandle->pvObj); 1469 1467 ASMAtomicWriteNullPtr(&pHandle->pSession); … … 1484 1482 * @returns VBox status code 1485 1483 * @param pGVM The global (ring-0) VM structure. 1486 * @param pVM The cross context VM structure.1487 1484 * @param idCpu VCPU id to register the current thread as. 1488 1485 */ 1489 GVMMR0DECL(int) GVMMR0RegisterVCpu(PGVM pGVM, PVMCC pVM,VMCPUID idCpu)1486 GVMMR0DECL(int) GVMMR0RegisterVCpu(PGVM pGVM, VMCPUID idCpu) 1490 1487 { 1491 1488 AssertReturn(idCpu != 0, VERR_INVALID_FUNCTION); … … 1495 1492 */ 1496 1493 PGVMM pGVMM; 1497 int rc = gvmmR0ByGVM andVM(pGVM, pVM, &pGVMM, false /* fTakeUsedLock */); /** @todo take lock here. */1494 int rc = gvmmR0ByGVM(pGVM, &pGVMM, false /* fTakeUsedLock */); /** @todo take lock here. */ 1498 1495 if (RT_SUCCESS(rc)) 1499 1496 { … … 1540 1537 * @returns VBox status code 1541 1538 * @param pGVM The global (ring-0) VM structure. 1542 * @param pVM The cross context VM structure.1543 1539 * @param idCpu VCPU id to register the current thread as. 1544 1540 */ 1545 GVMMR0DECL(int) GVMMR0DeregisterVCpu(PGVM pGVM, PVMCC pVM,VMCPUID idCpu)1541 GVMMR0DECL(int) GVMMR0DeregisterVCpu(PGVM pGVM, VMCPUID idCpu) 1546 1542 { 1547 1543 AssertReturn(idCpu != 0, VERR_INVALID_FUNCTION); … … 1551 1547 */ 1552 1548 PGVMM pGVMM; 1553 int rc = gvmmR0ByGVMand VMandEMT(pGVM, pVM, idCpu, &pGVMM);1549 int rc = gvmmR0ByGVMandEMT(pGVM, idCpu, &pGVMM); 1554 1550 if (RT_SUCCESS(rc)) 1555 1551 { … … 1606 1602 */ 1607 1603 PGVMHANDLE pHandle = &pGVMM->aHandles[hGVM]; 1608 AssertPtrReturn(pHandle->pVM , NULL);1604 AssertPtrReturn(pHandle->pVMRemoveMe, NULL); 1609 1605 AssertPtrReturn(pHandle->pvObj, NULL); 1610 1606 PGVM pGVM = pHandle->pGVM; 1611 1607 AssertPtrReturn(pGVM, NULL); 1612 AssertReturn(pGVM == pHandle->pVM, NULL); 1613 1614 return pHandle->pGVM; 1615 } 1616 1617 1618 /** 1619 * Lookup a GVM structure by the shared VM structure. 1608 1609 return pGVM; 1610 } 1611 1612 1613 /** 1614 * Check that the given GVM and VM structures match up. 1620 1615 * 1621 1616 * The calling thread must be in the same process as the VM. All current lookups … … 1623 1618 * 1624 1619 * @returns VBox status code. 1625 * @param pVM The cross context VM structure. 1626 * @param ppGVM Where to store the GVM pointer. 1620 * @param pGVM The global (ring-0) VM structure. 1627 1621 * @param ppGVMM Where to store the pointer to the GVMM instance data. 1628 1622 * @param fTakeUsedLock Whether to take the used lock or not. We take it in … … 1632 1626 * possible that the VM will disappear then! 1633 1627 * 1634 * @remark This will not assert on an invalid pVM but try return silently. 1635 */ 1636 static int gvmmR0ByVM(PVMCC pVM, PGVM *ppGVM, PGVMM *ppGVMM, bool fTakeUsedLock) 1637 { 1638 RTPROCESS ProcId = RTProcSelf(); 1639 PGVMM pGVMM; 1640 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_GVMM_INSTANCE); 1641 1642 /* 1643 * Validate. 1644 */ 1645 if (RT_UNLIKELY( !VALID_PTR(pVM) 1646 || ((uintptr_t)pVM & PAGE_OFFSET_MASK))) 1647 return VERR_INVALID_POINTER; 1648 if (RT_UNLIKELY( pVM->enmVMState < VMSTATE_CREATING 1649 || pVM->enmVMState >= VMSTATE_TERMINATED)) 1650 return VERR_INVALID_POINTER; 1651 1652 uint16_t hGVM = pVM->hSelf; 1653 ASMCompilerBarrier(); 1654 if (RT_UNLIKELY( hGVM == NIL_GVM_HANDLE 1655 || hGVM >= RT_ELEMENTS(pGVMM->aHandles))) 1656 return VERR_INVALID_HANDLE; 1657 1658 /* 1659 * Look it up. 1660 */ 1661 PGVMHANDLE pHandle = &pGVMM->aHandles[hGVM]; 1662 PGVM pGVM; 1663 if (fTakeUsedLock) 1664 { 1665 int rc = GVMMR0_USED_SHARED_LOCK(pGVMM); 1666 AssertRCReturn(rc, rc); 1667 1668 pGVM = pHandle->pGVM; 1669 if (RT_UNLIKELY( pHandle->pVM != pVM 1670 || pHandle->ProcId != ProcId 1671 || !VALID_PTR(pHandle->pvObj) 1672 || !VALID_PTR(pGVM) 1673 || pGVM != pVM)) 1674 { 1675 GVMMR0_USED_SHARED_UNLOCK(pGVMM); 1676 return VERR_INVALID_HANDLE; 1677 } 1678 } 1679 else 1680 { 1681 if (RT_UNLIKELY(pHandle->pVM != pVM)) 1682 return VERR_INVALID_HANDLE; 1683 if (RT_UNLIKELY(pHandle->ProcId != ProcId)) 1684 return VERR_INVALID_HANDLE; 1685 if (RT_UNLIKELY(!VALID_PTR(pHandle->pvObj))) 1686 return VERR_INVALID_HANDLE; 1687 1688 pGVM = pHandle->pGVM; 1689 if (RT_UNLIKELY(!VALID_PTR(pGVM))) 1690 return VERR_INVALID_HANDLE; 1691 if (RT_UNLIKELY(pGVM != pVM)) 1692 return VERR_INVALID_HANDLE; 1693 } 1694 1695 *ppGVM = pGVM; 1696 *ppGVMM = pGVMM; 1697 return VINF_SUCCESS; 1698 } 1699 1700 1701 /** 1702 * Fast look up a GVM structure by the cross context VM structure. 1703 * 1704 * This is mainly used a glue function, so performance is . 1705 * 1706 * @returns GVM on success, NULL on failure. 1707 * @param pVM The cross context VM structure. ASSUMES to be 1708 * reasonably valid, so we can do fewer checks than in 1709 * gvmmR0ByVM. 1710 * 1711 * @note Do not use this on pVM structures from userland! 1712 */ 1713 GVMMR0DECL(PGVM) GVMMR0FastGetGVMByVM(PVMCC pVM) 1714 { 1715 AssertPtr(pVM); 1716 Assert(!((uintptr_t)pVM & PAGE_OFFSET_MASK)); 1717 1718 PGVMM pGVMM; 1719 GVMM_GET_VALID_INSTANCE(pGVMM, NULL); 1720 1721 /* 1722 * Validate. 1723 */ 1724 uint16_t hGVM = pVM->hSelf; 1725 ASMCompilerBarrier(); 1726 AssertReturn(hGVM != NIL_GVM_HANDLE && hGVM < RT_ELEMENTS(pGVMM->aHandles), NULL); 1727 1728 /* 1729 * Look it up and check pVM against the value in the handle and GVM structures. 1730 */ 1731 PGVMHANDLE pHandle = &pGVMM->aHandles[hGVM]; 1732 AssertReturn(pHandle->pVM == pVM, NULL); 1733 1734 PGVM pGVM = pHandle->pGVM; 1735 AssertPtrReturn(pGVM, NULL); 1736 AssertReturn(pGVM == pVM, NULL); 1737 1738 return pGVM; 1739 } 1740 1741 1742 /** 1743 * Check that the given GVM and VM structures match up. 1744 * 1745 * The calling thread must be in the same process as the VM. All current lookups 1746 * are by threads inside the same process, so this will not be an issue. 1747 * 1748 * @returns VBox status code. 1749 * @param pGVM The global (ring-0) VM structure. 1750 * @param pVM The cross context VM structure. 1751 * @param ppGVMM Where to store the pointer to the GVMM instance data. 1752 * @param fTakeUsedLock Whether to take the used lock or not. We take it in 1753 * shared mode when requested. 1754 * 1755 * Be very careful if not taking the lock as it's 1756 * possible that the VM will disappear then! 1757 * 1758 * @remark This will not assert on an invalid pVM but try return silently. 1759 */ 1760 static int gvmmR0ByGVMandVM(PGVM pGVM, PVMCC pVM, PGVMM *ppGVMM, bool fTakeUsedLock) 1628 * @remark This will not assert on an invalid pGVM but try return silently. 1629 */ 1630 static int gvmmR0ByGVM(PGVM pGVM, PGVMM *ppGVMM, bool fTakeUsedLock) 1761 1631 { 1762 1632 /* … … 1764 1634 */ 1765 1635 int rc; 1766 if (RT_LIKELY(RT_VALID_PTR(pGVM))) 1767 { 1768 if (RT_LIKELY( RT_VALID_PTR(pVM) 1769 && ((uintptr_t)pVM & PAGE_OFFSET_MASK) == 0)) 1770 { 1771 if (RT_LIKELY(pGVM == pVM)) 1636 if (RT_LIKELY( RT_VALID_PTR(pGVM) 1637 && ((uintptr_t)pGVM & PAGE_OFFSET_MASK) == 0 )) 1638 { 1639 /* 1640 * Get the pGVMM instance and check the VM handle. 1641 */ 1642 PGVMM pGVMM; 1643 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_GVMM_INSTANCE); 1644 1645 uint16_t hGVM = pGVM->hSelf; 1646 if (RT_LIKELY( hGVM != NIL_GVM_HANDLE 1647 && hGVM < RT_ELEMENTS(pGVMM->aHandles))) 1648 { 1649 RTPROCESS const pidSelf = RTProcSelf(); 1650 PGVMHANDLE pHandle = &pGVMM->aHandles[hGVM]; 1651 if (fTakeUsedLock) 1652 { 1653 rc = GVMMR0_USED_SHARED_LOCK(pGVMM); 1654 AssertRCReturn(rc, rc); 1655 } 1656 1657 if (RT_LIKELY( pHandle->pGVM == pGVM 1658 && pHandle->ProcId == pidSelf 1659 && RT_VALID_PTR(pHandle->pvObj))) 1772 1660 { 1773 1661 /* 1774 * Get the pGVMM instance and check the VM handle.1662 * Some more VM data consistency checks. 1775 1663 */ 1776 PGVMM pGVMM; 1777 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_GVMM_INSTANCE); 1778 1779 uint16_t hGVM = pGVM->hSelf; 1780 if (RT_LIKELY( hGVM != NIL_GVM_HANDLE 1781 && hGVM < RT_ELEMENTS(pGVMM->aHandles))) 1664 if (RT_LIKELY( pGVM->cCpusUnsafe == pGVM->cCpus 1665 && pGVM->hSelfUnsafe == hGVM 1666 && pGVM->pSelf == pGVM)) 1782 1667 { 1783 RTPROCESS const pidSelf = RTProcSelf(); 1784 PGVMHANDLE pHandle = &pGVMM->aHandles[hGVM]; 1785 if (fTakeUsedLock) 1668 if (RT_LIKELY( pGVM->enmVMState >= VMSTATE_CREATING 1669 && pGVM->enmVMState <= VMSTATE_TERMINATED)) 1786 1670 { 1787 rc = GVMMR0_USED_SHARED_LOCK(pGVMM);1788 AssertRCReturn(rc, rc);1671 *ppGVMM = pGVMM; 1672 return VINF_SUCCESS; 1789 1673 } 1790 1791 if (RT_LIKELY( pHandle->pGVM == pGVM 1792 && pHandle->pVM == pVM 1793 && pHandle->ProcId == pidSelf 1794 && RT_VALID_PTR(pHandle->pvObj))) 1795 { 1796 /* 1797 * Some more VM data consistency checks. 1798 */ 1799 if (RT_LIKELY( pVM->cCpus == pGVM->cCpus 1800 && pVM->hSelf == hGVM 1801 && pVM->enmVMState >= VMSTATE_CREATING 1802 && pVM->enmVMState <= VMSTATE_TERMINATED 1803 && pVM->pSelf == pVM 1804 )) 1805 { 1806 *ppGVMM = pGVMM; 1807 return VINF_SUCCESS; 1808 } 1809 } 1810 1811 if (fTakeUsedLock) 1812 GVMMR0_USED_SHARED_UNLOCK(pGVMM); 1674 rc = VERR_INCONSISTENT_VM_HANDLE; 1813 1675 } 1676 else 1677 rc = VERR_INCONSISTENT_VM_HANDLE; 1814 1678 } 1679 else 1680 rc = VERR_INVALID_VM_HANDLE; 1681 1682 if (fTakeUsedLock) 1683 GVMMR0_USED_SHARED_UNLOCK(pGVMM); 1684 } 1685 else 1815 1686 rc = VERR_INVALID_VM_HANDLE; 1816 }1817 else1818 rc = VERR_INVALID_POINTER;1819 1687 } 1820 1688 else … … 1825 1693 1826 1694 /** 1695 * Validates a GVM/VM pair. 1696 * 1697 * @returns VBox status code. 1698 * @param pGVM The global (ring-0) VM structure. 1699 */ 1700 GVMMR0DECL(int) GVMMR0ValidateGVM(PGVM pGVM) 1701 { 1702 PGVMM pGVMM; 1703 return gvmmR0ByGVM(pGVM, &pGVMM, false /*fTakeUsedLock*/); 1704 } 1705 1706 1707 /** 1827 1708 * Check that the given GVM and VM structures match up. 1828 1709 * … … 1832 1713 * @returns VBox status code. 1833 1714 * @param pGVM The global (ring-0) VM structure. 1834 * @param pVM The cross context VM structure.1835 1715 * @param idCpu The (alleged) Virtual CPU ID of the calling EMT. 1836 1716 * @param ppGVMM Where to store the pointer to the GVMM instance data. … … 1839 1719 * @remarks This will assert in all failure paths. 1840 1720 */ 1841 static int gvmmR0ByGVMand VMandEMT(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, PGVMM *ppGVMM)1721 static int gvmmR0ByGVMandEMT(PGVM pGVM, VMCPUID idCpu, PGVMM *ppGVMM) 1842 1722 { 1843 1723 /* … … 1845 1725 */ 1846 1726 AssertPtrReturn(pGVM, VERR_INVALID_POINTER); 1847 1848 AssertPtrReturn(pVM, VERR_INVALID_POINTER); 1849 AssertReturn(((uintptr_t)pVM & PAGE_OFFSET_MASK) == 0, VERR_INVALID_POINTER); 1850 AssertReturn(pGVM == pVM, VERR_INVALID_VM_HANDLE); 1727 AssertReturn(((uintptr_t)pGVM & PAGE_OFFSET_MASK) == 0, VERR_INVALID_POINTER); 1851 1728 1852 1729 /* … … 1864 1741 PGVMHANDLE pHandle = &pGVMM->aHandles[hGVM]; 1865 1742 AssertReturn( pHandle->pGVM == pGVM 1866 && pHandle->pVM == pVM1867 1743 && pHandle->ProcId == pidSelf 1868 1744 && RT_VALID_PTR(pHandle->pvObj), … … 1879 1755 * Some more VM data consistency checks. 1880 1756 */ 1881 AssertReturn(p VM->cCpus== pGVM->cCpus, VERR_INCONSISTENT_VM_HANDLE);1882 AssertReturn(p VM->hSelf== hGVM, VERR_INCONSISTENT_VM_HANDLE);1883 AssertReturn( p VM->enmVMState >= VMSTATE_CREATING1884 && p VM->enmVMState <= VMSTATE_TERMINATED, VERR_INCONSISTENT_VM_HANDLE);1757 AssertReturn(pGVM->cCpusUnsafe == pGVM->cCpus, VERR_INCONSISTENT_VM_HANDLE); 1758 AssertReturn(pGVM->hSelfUnsafe == hGVM, VERR_INCONSISTENT_VM_HANDLE); 1759 AssertReturn( pGVM->enmVMState >= VMSTATE_CREATING 1760 && pGVM->enmVMState <= VMSTATE_TERMINATED, VERR_INCONSISTENT_VM_HANDLE); 1885 1761 1886 1762 *ppGVMM = pGVMM; … … 1890 1766 1891 1767 /** 1892 * Validates a GVM/ VMpair.1768 * Validates a GVM/EMT pair. 1893 1769 * 1894 1770 * @returns VBox status code. 1895 1771 * @param pGVM The global (ring-0) VM structure. 1896 * @param pVM The cross context VM structure.1897 */1898 GVMMR0DECL(int) GVMMR0ValidateGVMandVM(PGVM pGVM, PVMCC pVM)1899 {1900 PGVMM pGVMM;1901 return gvmmR0ByGVMandVM(pGVM, pVM, &pGVMM, false /*fTakeUsedLock*/);1902 }1903 1904 1905 1906 /**1907 * Validates a GVM/VM/EMT combo.1908 *1909 * @returns VBox status code.1910 * @param pGVM The global (ring-0) VM structure.1911 * @param pVM The cross context VM structure.1912 1772 * @param idCpu The Virtual CPU ID of the calling EMT. 1913 1773 * @thread EMT(idCpu) 1914 1774 */ 1915 GVMMR0DECL(int) GVMMR0ValidateGVMand VMandEMT(PGVM pGVM, PVMCC pVM, VMCPUID idCpu)1775 GVMMR0DECL(int) GVMMR0ValidateGVMandEMT(PGVM pGVM, VMCPUID idCpu) 1916 1776 { 1917 1777 PGVMM pGVMM; 1918 return gvmmR0ByGVMand VMandEMT(pGVM, pVM, idCpu, &pGVMM);1778 return gvmmR0ByGVMandEMT(pGVM, idCpu, &pGVMM); 1919 1779 } 1920 1780 … … 1938 1798 */ 1939 1799 PGVMM pGVMM = g_pGVMM; 1940 if ( ! VALID_PTR(pGVMM)1800 if ( !RT_VALID_PTR(pGVMM) 1941 1801 || pGVMM->u32Magic != GVMM_MAGIC) 1942 1802 return NULL; … … 1954 1814 if ( pGVMM->aHandles[i].iSelf == i 1955 1815 && pGVMM->aHandles[i].ProcId == ProcId 1956 && VALID_PTR(pGVMM->aHandles[i].pvObj) 1957 && VALID_PTR(pGVMM->aHandles[i].pVM) 1958 && VALID_PTR(pGVMM->aHandles[i].pGVM)) 1816 && RT_VALID_PTR(pGVMM->aHandles[i].pvObj) 1817 && RT_VALID_PTR(pGVMM->aHandles[i].pGVM)) 1959 1818 { 1960 1819 if (pGVMM->aHandles[i].hEMT0 == hEMT) 1961 return pGVMM->aHandles[i].p VM;1820 return pGVMM->aHandles[i].pGVM; 1962 1821 1963 1822 /* This is fearly safe with the current process per VM approach. */ … … 1970 1829 for (VMCPUID idCpu = 1; idCpu < cCpus; idCpu++) 1971 1830 if (pGVM->aCpus[idCpu].hEMT == hEMT) 1972 return pGVMM->aHandles[i].p VM;1831 return pGVMM->aHandles[i].pGVM; 1973 1832 } 1974 1833 } … … 1995 1854 */ 1996 1855 PGVMM pGVMM = g_pGVMM; 1997 if ( ! VALID_PTR(pGVMM)1856 if ( !RT_VALID_PTR(pGVMM) 1998 1857 || pGVMM->u32Magic != GVMM_MAGIC) 1999 1858 return NULL; … … 2011 1870 if ( pGVMM->aHandles[i].iSelf == i 2012 1871 && pGVMM->aHandles[i].ProcId == ProcId 2013 && VALID_PTR(pGVMM->aHandles[i].pvObj) 2014 && VALID_PTR(pGVMM->aHandles[i].pVM) 2015 && VALID_PTR(pGVMM->aHandles[i].pGVM)) 1872 && RT_VALID_PTR(pGVMM->aHandles[i].pvObj) 1873 && RT_VALID_PTR(pGVMM->aHandles[i].pGVM)) 2016 1874 { 2017 1875 PGVM pGVM = pGVMM->aHandles[i].pGVM; … … 2082 1940 { 2083 1941 PGVM pCurGVM = pGVMM->aHandles[i].pGVM; 2084 if ( VALID_PTR(pCurGVM)1942 if ( RT_VALID_PTR(pCurGVM) 2085 1943 && pCurGVM->u32Magic == GVM_MAGIC) 2086 1944 { … … 2123 1981 { 2124 1982 PGVM pCurGVM = pGVMM->aHandles[i].pGVM; 2125 if ( VALID_PTR(pCurGVM)1983 if ( RT_VALID_PTR(pCurGVM) 2126 1984 && pCurGVM->u32Magic == GVM_MAGIC) 2127 1985 { … … 2153 2011 { 2154 2012 PGVM pCurGVM = pGVMM->aHandles[i].pGVM; 2155 if ( VALID_PTR(pCurGVM)2013 if ( RT_VALID_PTR(pCurGVM) 2156 2014 && pCurGVM->u32Magic == GVM_MAGIC) 2157 2015 { … … 2192 2050 * VERR_INTERRUPTED if a signal was scheduled for the thread. 2193 2051 * @param pGVM The global (ring-0) VM structure. 2194 * @param pVM The cross context VM structure.2195 2052 * @param pGVCpu The global (ring-0) CPU structure of the calling 2196 2053 * EMT. … … 2198 2055 * @thread EMT(pGVCpu). 2199 2056 */ 2200 GVMMR0DECL(int) GVMMR0SchedHalt(PGVM pGVM, P VMCC pVM, PGVMCPU pGVCpu, uint64_t u64ExpireGipTime)2201 { 2202 LogFlow(("GVMMR0SchedHalt: pGVM=%p p VM=%p pGVCpu=%p(%d) u64ExpireGipTime=%#RX64\n",2203 pGVM, p VM, pGVCpu, pGVCpu->idCpu, u64ExpireGipTime));2057 GVMMR0DECL(int) GVMMR0SchedHalt(PGVM pGVM, PGVMCPU pGVCpu, uint64_t u64ExpireGipTime) 2058 { 2059 LogFlow(("GVMMR0SchedHalt: pGVM=%p pGVCpu=%p(%d) u64ExpireGipTime=%#RX64\n", 2060 pGVM, pGVCpu, pGVCpu->idCpu, u64ExpireGipTime)); 2204 2061 GVMM_CHECK_SMAP_SETUP(); 2205 GVMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2062 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2206 2063 2207 2064 PGVMM pGVMM; … … 2220 2077 { 2221 2078 int rc2 = GVMMR0_USED_SHARED_LOCK(pGVMM); AssertRC(rc2); 2222 GVMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2079 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2223 2080 } 2224 2081 … … 2231 2088 const uint64_t u64NowSys = RTTimeSystemNanoTS(); 2232 2089 const uint64_t u64NowGip = RTTimeNanoTS(); 2233 GVMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2090 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2234 2091 2235 2092 if (fDoEarlyWakeUps) 2236 2093 { 2237 2094 pGVM->gvmm.s.StatsSched.cHaltWakeUps += gvmmR0SchedDoWakeUps(pGVMM, u64NowGip); 2238 GVMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2095 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2239 2096 } 2240 2097 … … 2261 2118 GVMMR0_USED_SHARED_UNLOCK(pGVMM); 2262 2119 } 2263 GVMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2120 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2264 2121 2265 2122 rc = RTSemEventMultiWaitEx(pGVCpu->gvmm.s.HaltEventMulti, 2266 2123 RTSEMWAIT_FLAGS_ABSOLUTE | RTSEMWAIT_FLAGS_NANOSECS | RTSEMWAIT_FLAGS_INTERRUPTIBLE, 2267 2124 u64NowGip > u64NowSys ? u64ExpireGipTime : u64NowSys + cNsInterval); 2268 GVMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2125 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2269 2126 2270 2127 ASMAtomicWriteU64(&pGVCpu->gvmm.s.u64HaltExpire, 0); … … 2275 2132 { 2276 2133 RTSemEventMultiReset(pGVCpu->gvmm.s.HaltEventMulti); 2277 GVMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2134 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2278 2135 } 2279 2136 else if (rc == VERR_TIMEOUT) … … 2288 2145 if (fDoEarlyWakeUps) 2289 2146 GVMMR0_USED_SHARED_UNLOCK(pGVMM); 2290 GVMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2147 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2291 2148 RTSemEventMultiReset(pGVCpu->gvmm.s.HaltEventMulti); 2292 GVMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2149 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2293 2150 rc = VINF_SUCCESS; 2294 2151 } … … 2304 2161 * VERR_INTERRUPTED if a signal was scheduled for the thread. 2305 2162 * @param pGVM The global (ring-0) VM structure. 2306 * @param pVM The cross context VM structure.2307 2163 * @param idCpu The Virtual CPU ID of the calling EMT. 2308 2164 * @param u64ExpireGipTime The time for the sleep to expire expressed as GIP time. 2309 2165 * @thread EMT(idCpu). 2310 2166 */ 2311 GVMMR0DECL(int) GVMMR0SchedHaltReq(PGVM pGVM, PVMCC pVM,VMCPUID idCpu, uint64_t u64ExpireGipTime)2167 GVMMR0DECL(int) GVMMR0SchedHaltReq(PGVM pGVM, VMCPUID idCpu, uint64_t u64ExpireGipTime) 2312 2168 { 2313 2169 GVMM_CHECK_SMAP_SETUP(); 2314 GVMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2170 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2315 2171 PGVMM pGVMM; 2316 int rc = gvmmR0ByGVMand VMandEMT(pGVM, pVM, idCpu, &pGVMM);2172 int rc = gvmmR0ByGVMandEMT(pGVM, idCpu, &pGVMM); 2317 2173 if (RT_SUCCESS(rc)) 2318 2174 { 2319 GVMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2320 rc = GVMMR0SchedHalt(pGVM, pVM,&pGVM->aCpus[idCpu], u64ExpireGipTime);2321 } 2322 GVMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2175 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2176 rc = GVMMR0SchedHalt(pGVM, &pGVM->aCpus[idCpu], u64ExpireGipTime); 2177 } 2178 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2323 2179 return rc; 2324 2180 } … … 2376 2232 * 2377 2233 * @param pGVM The global (ring-0) VM structure. 2378 * @param pVM The cross context VM structure.2379 2234 * @param idCpu The Virtual CPU ID of the EMT to wake up. 2380 2235 * @param fTakeUsedLock Take the used lock or not 2381 2236 * @thread Any but EMT(idCpu). 2382 2237 */ 2383 GVMMR0DECL(int) GVMMR0SchedWakeUpEx(PGVM pGVM, PVMCC pVM,VMCPUID idCpu, bool fTakeUsedLock)2238 GVMMR0DECL(int) GVMMR0SchedWakeUpEx(PGVM pGVM, VMCPUID idCpu, bool fTakeUsedLock) 2384 2239 { 2385 2240 GVMM_CHECK_SMAP_SETUP(); 2386 GVMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2241 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2387 2242 2388 2243 /* … … 2390 2245 */ 2391 2246 PGVMM pGVMM; 2392 int rc = gvmmR0ByGVM andVM(pGVM, pVM, &pGVMM, fTakeUsedLock);2393 GVMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2247 int rc = gvmmR0ByGVM(pGVM, &pGVMM, fTakeUsedLock); 2248 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2394 2249 if (RT_SUCCESS(rc)) 2395 2250 { … … 2400 2255 */ 2401 2256 rc = gvmmR0SchedWakeUpOne(pGVM, &pGVM->aCpus[idCpu]); 2402 GVMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2257 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2403 2258 2404 2259 if (fTakeUsedLock && pGVMM->fDoEarlyWakeUps) … … 2410 2265 const uint64_t u64Now = RTTimeNanoTS(); /* (GIP time) */ 2411 2266 pGVM->gvmm.s.StatsSched.cWakeUpWakeUps += gvmmR0SchedDoWakeUps(pGVMM, u64Now); 2412 GVMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2267 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2413 2268 } 2414 2269 } … … 2420 2275 int rc2 = GVMMR0_USED_SHARED_UNLOCK(pGVMM); 2421 2276 AssertRC(rc2); 2422 GVMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2277 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2423 2278 } 2424 2279 } … … 2437 2292 * 2438 2293 * @param pGVM The global (ring-0) VM structure. 2439 * @param pVM The cross context VM structure.2440 2294 * @param idCpu The Virtual CPU ID of the EMT to wake up. 2441 2295 * @thread Any but EMT(idCpu). 2442 2296 */ 2443 GVMMR0DECL(int) GVMMR0SchedWakeUp(PGVM pGVM, PVMCC pVM,VMCPUID idCpu)2444 { 2445 return GVMMR0SchedWakeUpEx(pGVM, pVM,idCpu, true /* fTakeUsedLock */);2297 GVMMR0DECL(int) GVMMR0SchedWakeUp(PGVM pGVM, VMCPUID idCpu) 2298 { 2299 return GVMMR0SchedWakeUpEx(pGVM, idCpu, true /* fTakeUsedLock */); 2446 2300 } 2447 2301 … … 2455 2309 * @retval VINF_GVM_NOT_BLOCKED if the EMT wasn't blocked. 2456 2310 * 2457 * @param p VM The cross contextVM structure.2311 * @param pGVM The global (ring-0) VM structure. 2458 2312 * @param idCpu The Virtual CPU ID of the EMT to wake up. 2459 2313 * @thread Any but EMT(idCpu). 2460 2314 * @deprecated Don't use in new code if possible! Use the GVM variant. 2461 2315 */ 2462 GVMMR0DECL(int) GVMMR0SchedWakeUpNoGVMNoLock(P VMCC pVM, VMCPUID idCpu)2316 GVMMR0DECL(int) GVMMR0SchedWakeUpNoGVMNoLock(PGVM pGVM, VMCPUID idCpu) 2463 2317 { 2464 2318 GVMM_CHECK_SMAP_SETUP(); 2465 GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING); 2466 PGVM pGVM; 2319 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2467 2320 PGVMM pGVMM; 2468 int rc = gvmmR0By VM(pVM, &pGVM, &pGVMM, false /*fTakeUsedLock*/);2469 GVMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2321 int rc = gvmmR0ByGVM(pGVM, &pGVMM, false /*fTakeUsedLock*/); 2322 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2470 2323 if (RT_SUCCESS(rc)) 2471 rc = GVMMR0SchedWakeUpEx(pGVM, pVM,idCpu, false /*fTakeUsedLock*/);2324 rc = GVMMR0SchedWakeUpEx(pGVM, idCpu, false /*fTakeUsedLock*/); 2472 2325 return rc; 2473 2326 } … … 2511 2364 * 2512 2365 * @param pGVM The global (ring-0) VM structure. 2513 * @param pVM The cross context VM structure.2514 2366 * @param idCpu The ID of the virtual CPU to poke. 2515 2367 * @param fTakeUsedLock Take the used lock or not 2516 2368 */ 2517 GVMMR0DECL(int) GVMMR0SchedPokeEx(PGVM pGVM, PVMCC pVM,VMCPUID idCpu, bool fTakeUsedLock)2369 GVMMR0DECL(int) GVMMR0SchedPokeEx(PGVM pGVM, VMCPUID idCpu, bool fTakeUsedLock) 2518 2370 { 2519 2371 /* … … 2521 2373 */ 2522 2374 PGVMM pGVMM; 2523 int rc = gvmmR0ByGVM andVM(pGVM, pVM, &pGVMM, fTakeUsedLock);2375 int rc = gvmmR0ByGVM(pGVM, &pGVMM, fTakeUsedLock); 2524 2376 if (RT_SUCCESS(rc)) 2525 2377 { … … 2549 2401 * 2550 2402 * @param pGVM The global (ring-0) VM structure. 2551 * @param pVM The cross context VM structure.2552 2403 * @param idCpu The ID of the virtual CPU to poke. 2553 2404 */ 2554 GVMMR0DECL(int) GVMMR0SchedPoke(PGVM pGVM, PVMCC pVM,VMCPUID idCpu)2555 { 2556 return GVMMR0SchedPokeEx(pGVM, pVM,idCpu, true /* fTakeUsedLock */);2405 GVMMR0DECL(int) GVMMR0SchedPoke(PGVM pGVM, VMCPUID idCpu) 2406 { 2407 return GVMMR0SchedPokeEx(pGVM, idCpu, true /* fTakeUsedLock */); 2557 2408 } 2558 2409 … … 2566 2417 * @retval VINF_GVM_NOT_BUSY_IN_GC if the EMT wasn't busy in GC. 2567 2418 * 2568 * @param p VM The cross contextVM structure.2419 * @param pGVM The global (ring-0) VM structure. 2569 2420 * @param idCpu The ID of the virtual CPU to poke. 2570 2421 * 2571 2422 * @deprecated Don't use in new code if possible! Use the GVM variant. 2572 2423 */ 2573 GVMMR0DECL(int) GVMMR0SchedPokeNoGVMNoLock(PVMCC pVM, VMCPUID idCpu) 2574 { 2575 PGVM pGVM; 2424 GVMMR0DECL(int) GVMMR0SchedPokeNoGVMNoLock(PGVM pGVM, VMCPUID idCpu) 2425 { 2576 2426 PGVMM pGVMM; 2577 int rc = gvmmR0By VM(pVM, &pGVM, &pGVMM, false /*fTakeUsedLock*/);2427 int rc = gvmmR0ByGVM(pGVM, &pGVMM, false /*fTakeUsedLock*/); 2578 2428 if (RT_SUCCESS(rc)) 2579 2429 { … … 2593 2443 * 2594 2444 * @param pGVM The global (ring-0) VM structure. 2595 * @param pVM The cross context VM structure.2596 2445 * @param pSleepSet The set of sleepers to wake up. 2597 2446 * @param pPokeSet The set of CPUs to poke. 2598 2447 */ 2599 GVMMR0DECL(int) GVMMR0SchedWakeUpAndPokeCpus(PGVM pGVM, P VMCC pVM, PCVMCPUSET pSleepSet, PCVMCPUSET pPokeSet)2448 GVMMR0DECL(int) GVMMR0SchedWakeUpAndPokeCpus(PGVM pGVM, PCVMCPUSET pSleepSet, PCVMCPUSET pPokeSet) 2600 2449 { 2601 2450 AssertPtrReturn(pSleepSet, VERR_INVALID_POINTER); 2602 2451 AssertPtrReturn(pPokeSet, VERR_INVALID_POINTER); 2603 2452 GVMM_CHECK_SMAP_SETUP(); 2604 GVMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2453 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2605 2454 RTNATIVETHREAD hSelf = RTThreadNativeSelf(); 2606 2455 … … 2609 2458 */ 2610 2459 PGVMM pGVMM; 2611 int rc = gvmmR0ByGVM andVM(pGVM, pVM, &pGVMM, true /* fTakeUsedLock */);2612 GVMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2460 int rc = gvmmR0ByGVM(pGVM, &pGVMM, true /* fTakeUsedLock */); 2461 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2613 2462 if (RT_SUCCESS(rc)) 2614 2463 { … … 2625 2474 { 2626 2475 gvmmR0SchedWakeUpOne(pGVM, &pGVM->aCpus[idCpu]); 2627 GVMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2476 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2628 2477 } 2629 2478 else if (VMCPUSET_IS_PRESENT(pPokeSet, idCpu)) 2630 2479 { 2631 2480 gvmmR0SchedPokeOne(pGVM, &pGVM->aCpus[idCpu]); 2632 GVMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2481 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2633 2482 } 2634 2483 } … … 2636 2485 int rc2 = GVMMR0_USED_SHARED_UNLOCK(pGVMM); 2637 2486 AssertRC(rc2); 2638 GVMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2487 GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2639 2488 } 2640 2489 … … 2649 2498 * @returns see GVMMR0SchedWakeUpAndPokeCpus. 2650 2499 * @param pGVM The global (ring-0) VM structure. 2651 * @param pVM The cross context VM structure.2652 2500 * @param pReq Pointer to the request packet. 2653 2501 */ 2654 GVMMR0DECL(int) GVMMR0SchedWakeUpAndPokeCpusReq(PGVM pGVM, P VMCC pVM, PGVMMSCHEDWAKEUPANDPOKECPUSREQ pReq)2502 GVMMR0DECL(int) GVMMR0SchedWakeUpAndPokeCpusReq(PGVM pGVM, PGVMMSCHEDWAKEUPANDPOKECPUSREQ pReq) 2655 2503 { 2656 2504 /* … … 2660 2508 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER); 2661 2509 2662 return GVMMR0SchedWakeUpAndPokeCpus(pGVM, pVM,&pReq->SleepSet, &pReq->PokeSet);2510 return GVMMR0SchedWakeUpAndPokeCpus(pGVM, &pReq->SleepSet, &pReq->PokeSet); 2663 2511 } 2664 2512 … … 2674 2522 * VINF_GVM_YIELDED if an attempt to switch to a different VM task was made. 2675 2523 * @param pGVM The global (ring-0) VM structure. 2676 * @param pVM The cross context VM structure.2677 2524 * @param idCpu The Virtual CPU ID of the calling EMT. 2678 2525 * @param fYield Whether to yield or not. … … 2680 2527 * @thread EMT(idCpu). 2681 2528 */ 2682 GVMMR0DECL(int) GVMMR0SchedPoll(PGVM pGVM, PVMCC pVM,VMCPUID idCpu, bool fYield)2529 GVMMR0DECL(int) GVMMR0SchedPoll(PGVM pGVM, VMCPUID idCpu, bool fYield) 2683 2530 { 2684 2531 /* … … 2686 2533 */ 2687 2534 PGVMM pGVMM; 2688 int rc = gvmmR0ByGVMand VMandEMT(pGVM, pVM, idCpu, &pGVMM);2535 int rc = gvmmR0ByGVMandEMT(pGVM, idCpu, &pGVMM); 2689 2536 if (RT_SUCCESS(rc)) 2690 2537 { … … 2809 2656 * The caller must check that the host can do high resolution timers. 2810 2657 * 2811 * @param p VM The cross contextVM structure.2658 * @param pGVM The global (ring-0) VM structure. 2812 2659 * @param idHostCpu The current host CPU id. 2813 2660 * @param uHz The desired frequency. 2814 2661 */ 2815 GVMMR0DECL(void) GVMMR0SchedUpdatePeriodicPreemptionTimer(P VMCC pVM, RTCPUID idHostCpu, uint32_t uHz)2816 { 2817 NOREF(p VM);2662 GVMMR0DECL(void) GVMMR0SchedUpdatePeriodicPreemptionTimer(PGVM pGVM, RTCPUID idHostCpu, uint32_t uHz) 2663 { 2664 NOREF(pGVM); 2818 2665 #ifdef GVMM_SCHED_WITH_PPT 2819 2666 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); … … 2825 2672 uint32_t iCpu = RTMpCpuIdToSetIndex(idHostCpu); 2826 2673 PGVMM pGVMM = g_pGVMM; 2827 if ( ! VALID_PTR(pGVMM)2674 if ( !RT_VALID_PTR(pGVMM) 2828 2675 || pGVMM->u32Magic != GVMM_MAGIC) 2829 2676 return; … … 2894 2741 * @param pSession The current session. 2895 2742 * @param pGVM The GVM to obtain statistics for. Optional. 2896 * @param pVM The VM structure corresponding to @a pGVM. 2897 */ 2898 GVMMR0DECL(int) GVMMR0QueryStatistics(PGVMMSTATS pStats, PSUPDRVSESSION pSession, PGVM pGVM, PVMCC pVM) 2899 { 2900 LogFlow(("GVMMR0QueryStatistics: pStats=%p pSession=%p pGVM=%p pVM=%p\n", pStats, pSession, pGVM, pVM)); 2743 */ 2744 GVMMR0DECL(int) GVMMR0QueryStatistics(PGVMMSTATS pStats, PSUPDRVSESSION pSession, PGVM pGVM) 2745 { 2746 LogFlow(("GVMMR0QueryStatistics: pStats=%p pSession=%p pGVM=%p\n", pStats, pSession, pGVM)); 2901 2747 2902 2748 /* … … 2913 2759 if (pGVM) 2914 2760 { 2915 int rc = gvmmR0ByGVM andVM(pGVM, pVM, &pGVMM, true /*fTakeUsedLock*/);2761 int rc = gvmmR0ByGVM(pGVM, &pGVMM, true /*fTakeUsedLock*/); 2916 2762 if (RT_FAILURE(rc)) 2917 2763 return rc; … … 2940 2786 PGVM pOtherGVM = pGVMM->aHandles[i].pGVM; 2941 2787 void *pvObj = pGVMM->aHandles[i].pvObj; 2942 if ( VALID_PTR(pvObj)2943 && VALID_PTR(pOtherGVM)2788 if ( RT_VALID_PTR(pvObj) 2789 && RT_VALID_PTR(pOtherGVM) 2944 2790 && pOtherGVM->u32Magic == GVM_MAGIC 2945 2791 && RT_SUCCESS(SUPR0ObjVerifyAccess(pvObj, pSession, NULL))) … … 3007 2853 * @returns see GVMMR0QueryStatistics. 3008 2854 * @param pGVM The global (ring-0) VM structure. Optional. 3009 * @param pVM The cross context VM structure. Optional.3010 2855 * @param pReq Pointer to the request packet. 3011 2856 * @param pSession The current session. 3012 2857 */ 3013 GVMMR0DECL(int) GVMMR0QueryStatisticsReq(PGVM pGVM, P VMCC pVM, PGVMMQUERYSTATISTICSSREQ pReq, PSUPDRVSESSION pSession)2858 GVMMR0DECL(int) GVMMR0QueryStatisticsReq(PGVM pGVM, PGVMMQUERYSTATISTICSSREQ pReq, PSUPDRVSESSION pSession) 3014 2859 { 3015 2860 /* … … 3020 2865 AssertReturn(pReq->pSession == pSession, VERR_INVALID_PARAMETER); 3021 2866 3022 return GVMMR0QueryStatistics(&pReq->Stats, pSession, pGVM , pVM);2867 return GVMMR0QueryStatistics(&pReq->Stats, pSession, pGVM); 3023 2868 } 3024 2869 … … 3032 2877 * @param pSession The current session. 3033 2878 * @param pGVM The GVM to reset statistics for. Optional. 3034 * @param pVM The VM structure corresponding to @a pGVM. 3035 */ 3036 GVMMR0DECL(int) GVMMR0ResetStatistics(PCGVMMSTATS pStats, PSUPDRVSESSION pSession, PGVM pGVM, PVMCC pVM) 3037 { 3038 LogFlow(("GVMMR0ResetStatistics: pStats=%p pSession=%p pGVM=%p pVM=%p\n", pStats, pSession, pGVM, pVM)); 2879 */ 2880 GVMMR0DECL(int) GVMMR0ResetStatistics(PCGVMMSTATS pStats, PSUPDRVSESSION pSession, PGVM pGVM) 2881 { 2882 LogFlow(("GVMMR0ResetStatistics: pStats=%p pSession=%p pGVM=%p\n", pStats, pSession, pGVM)); 3039 2883 3040 2884 /* … … 3050 2894 if (pGVM) 3051 2895 { 3052 int rc = gvmmR0ByGVM andVM(pGVM, pVM, &pGVMM, true /*fTakeUsedLock*/);2896 int rc = gvmmR0ByGVM(pGVM, &pGVMM, true /*fTakeUsedLock*/); 3053 2897 if (RT_FAILURE(rc)) 3054 2898 return rc; … … 3089 2933 PGVM pOtherGVM = pGVMM->aHandles[i].pGVM; 3090 2934 void *pvObj = pGVMM->aHandles[i].pvObj; 3091 if ( VALID_PTR(pvObj)3092 && VALID_PTR(pOtherGVM)2935 if ( RT_VALID_PTR(pvObj) 2936 && RT_VALID_PTR(pOtherGVM) 3093 2937 && pOtherGVM->u32Magic == GVM_MAGIC 3094 2938 && RT_SUCCESS(SUPR0ObjVerifyAccess(pvObj, pSession, NULL))) … … 3125 2969 * @returns see GVMMR0ResetStatistics. 3126 2970 * @param pGVM The global (ring-0) VM structure. Optional. 3127 * @param pVM The cross context VM structure. Optional.3128 2971 * @param pReq Pointer to the request packet. 3129 2972 * @param pSession The current session. 3130 2973 */ 3131 GVMMR0DECL(int) GVMMR0ResetStatisticsReq(PGVM pGVM, P VMCC pVM, PGVMMRESETSTATISTICSSREQ pReq, PSUPDRVSESSION pSession)2974 GVMMR0DECL(int) GVMMR0ResetStatisticsReq(PGVM pGVM, PGVMMRESETSTATISTICSSREQ pReq, PSUPDRVSESSION pSession) 3132 2975 { 3133 2976 /* … … 3138 2981 AssertReturn(pReq->pSession == pSession, VERR_INVALID_PARAMETER); 3139 2982 3140 return GVMMR0ResetStatistics(&pReq->Stats, pSession, pGVM , pVM);3141 } 3142 2983 return GVMMR0ResetStatistics(&pReq->Stats, pSession, pGVM); 2984 } 2985 -
trunk/src/VBox/VMM/VMMR0/NEMR0Native-win.cpp
r80334 r80346 80 80 * Internal Functions * 81 81 *********************************************************************************************************************************/ 82 NEM_TMPL_STATIC int nemR0WinMapPages(PGVM pGVM, P VMCC pVM, PGVMCPU pGVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,82 NEM_TMPL_STATIC int nemR0WinMapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst, 83 83 uint32_t cPages, uint32_t fFlags); 84 84 NEM_TMPL_STATIC int nemR0WinUnmapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys, uint32_t cPages); … … 89 89 NEM_TMPL_STATIC int nemR0WinResumeCpuTickOnAll(PGVM pGVM, PGVMCPU pGVCpu, uint64_t uPausedTscValue); 90 90 #endif 91 DECLINLINE(NTSTATUS) nemR0NtPerformIoControl(PGVM pGVM, P VMCPUCC pVCpu, uint32_t uFunction, void *pvInput, uint32_t cbInput,91 DECLINLINE(NTSTATUS) nemR0NtPerformIoControl(PGVM pGVM, PGVMCPU pGVCpu, uint32_t uFunction, void *pvInput, uint32_t cbInput, 92 92 void *pvOutput, uint32_t cbOutput); 93 93 … … 156 156 * @returns VBox status code. 157 157 * @param pGVM The ring-0 VM handle. 158 * @param pVM The cross context VM handle.159 158 * @thread EMT(0) 160 159 */ 161 VMMR0_INT_DECL(int) NEMR0InitVM(PGVM pGVM , PVMCC pVM)160 VMMR0_INT_DECL(int) NEMR0InitVM(PGVM pGVM) 162 161 { 163 162 AssertCompile(sizeof(pGVM->nemr0.s) <= sizeof(pGVM->nemr0.padding)); 164 163 AssertCompile(sizeof(pGVM->aCpus[0].nemr0.s) <= sizeof(pGVM->aCpus[0].nemr0.padding)); 165 164 166 int rc = GVMMR0ValidateGVMand VMandEMT(pGVM, pVM, 0);165 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0); 167 166 AssertRCReturn(rc, rc); 168 167 … … 228 227 } 229 228 230 RT_NOREF(pVM);231 229 return rc; 232 230 } … … 238 236 * @returns NT status code. 239 237 * @param pGVM The ring-0 VM structure. 240 * @param p VCpu The cross contextCPU structure of the calling EMT.238 * @param pGVCpu The global (ring-0) CPU structure of the calling EMT. 241 239 * @param uFunction The function to perform. 242 240 * @param pvInput The input buffer. This must point within the VM … … 249 247 * @param cbOutput The size of the output. @a pvOutput must be NULL 250 248 * when zero. 251 * @thread EMT(p VCpu)249 * @thread EMT(pGVCpu) 252 250 */ 253 DECLINLINE(NTSTATUS) nemR0NtPerformIoControl(PGVM pGVM, P VMCPUCC pVCpu, uint32_t uFunction, void *pvInput, uint32_t cbInput,251 DECLINLINE(NTSTATUS) nemR0NtPerformIoControl(PGVM pGVM, PGVMCPU pGVCpu, uint32_t uFunction, void *pvInput, uint32_t cbInput, 254 252 void *pvOutput, uint32_t cbOutput) 255 253 { … … 258 256 * Input and output parameters are part of the VM CPU structure. 259 257 */ 260 VMCPU_ASSERT_EMT(p VCpu);258 VMCPU_ASSERT_EMT(pGVCpu); 261 259 if (pvInput) 262 AssertReturn(((uintptr_t)pvInput + cbInput) - (uintptr_t)p VCpu <= sizeof(*pVCpu), VERR_INVALID_PARAMETER);260 AssertReturn(((uintptr_t)pvInput + cbInput) - (uintptr_t)pGVCpu <= sizeof(*pGVCpu), VERR_INVALID_PARAMETER); 263 261 if (pvOutput) 264 AssertReturn(((uintptr_t)pvOutput + cbOutput) - (uintptr_t)p VCpu <= sizeof(*pVCpu), VERR_INVALID_PARAMETER);262 AssertReturn(((uintptr_t)pvOutput + cbOutput) - (uintptr_t)pGVCpu <= sizeof(*pGVCpu), VERR_INVALID_PARAMETER); 265 263 #endif 266 264 … … 268 266 int rc = SUPR0IoCtlPerform(pGVM->nemr0.s.pIoCtlCtx, uFunction, 269 267 pvInput, 270 pvInput ? (uintptr_t)pvInput + p VCpu->nemr0.s.offRing3ConversionDelta : NIL_RTR3PTR,268 pvInput ? (uintptr_t)pvInput + pGVCpu->nemr0.s.offRing3ConversionDelta : NIL_RTR3PTR, 271 269 cbInput, 272 270 pvOutput, 273 pvOutput ? (uintptr_t)pvOutput + p VCpu->nemr0.s.offRing3ConversionDelta : NIL_RTR3PTR,271 pvOutput ? (uintptr_t)pvOutput + pGVCpu->nemr0.s.offRing3ConversionDelta : NIL_RTR3PTR, 274 272 cbOutput, 275 273 &rcNt); … … 285 283 * @returns VBox status code. 286 284 * @param pGVM The ring-0 VM handle. 287 * @param pVM The cross context VM handle.288 285 * @thread EMT(0) 289 286 */ 290 VMMR0_INT_DECL(int) NEMR0InitVMPart2(PGVM pGVM , PVMCC pVM)287 VMMR0_INT_DECL(int) NEMR0InitVMPart2(PGVM pGVM) 291 288 { 292 int rc = GVMMR0ValidateGVMand VMandEMT(pGVM, pVM, 0);289 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0); 293 290 AssertRCReturn(rc, rc); 294 291 SUPR0Printf("NEMR0InitVMPart2\n"); LogRel(("2: NEMR0InitVMPart2\n")); … … 298 295 * Copy and validate the I/O control information from ring-3. 299 296 */ 300 NEMWINIOCTL Copy = p VM->nem.s.IoCtlGetHvPartitionId;297 NEMWINIOCTL Copy = pGVM->nem.s.IoCtlGetHvPartitionId; 301 298 AssertLogRelReturn(Copy.uFunction != 0, VERR_NEM_INIT_FAILED); 302 299 AssertLogRelReturn(Copy.cbInput == 0, VERR_NEM_INIT_FAILED); … … 304 301 pGVM->nemr0.s.IoCtlGetHvPartitionId = Copy; 305 302 306 pGVM->nemr0.s.fMayUseRing0Runloop = p VM->nem.s.fUseRing0Runloop;307 308 Copy = p VM->nem.s.IoCtlStartVirtualProcessor;303 pGVM->nemr0.s.fMayUseRing0Runloop = pGVM->nem.s.fUseRing0Runloop; 304 305 Copy = pGVM->nem.s.IoCtlStartVirtualProcessor; 309 306 AssertLogRelStmt(Copy.uFunction != 0, rc = VERR_NEM_INIT_FAILED); 310 307 AssertLogRelStmt(Copy.cbInput == sizeof(HV_VP_INDEX), rc = VERR_NEM_INIT_FAILED); … … 314 311 pGVM->nemr0.s.IoCtlStartVirtualProcessor = Copy; 315 312 316 Copy = p VM->nem.s.IoCtlStopVirtualProcessor;313 Copy = pGVM->nem.s.IoCtlStopVirtualProcessor; 317 314 AssertLogRelStmt(Copy.uFunction != 0, rc = VERR_NEM_INIT_FAILED); 318 315 AssertLogRelStmt(Copy.cbInput == sizeof(HV_VP_INDEX), rc = VERR_NEM_INIT_FAILED); … … 323 320 pGVM->nemr0.s.IoCtlStopVirtualProcessor = Copy; 324 321 325 Copy = p VM->nem.s.IoCtlMessageSlotHandleAndGetNext;322 Copy = pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext; 326 323 AssertLogRelStmt(Copy.uFunction != 0, rc = VERR_NEM_INIT_FAILED); 327 324 AssertLogRelStmt( Copy.cbInput == sizeof(VID_IOCTL_INPUT_MESSAGE_SLOT_HANDLE_AND_GET_NEXT) … … 336 333 337 334 if ( RT_SUCCESS(rc) 338 || !p VM->nem.s.fUseRing0Runloop)335 || !pGVM->nem.s.fUseRing0Runloop) 339 336 { 340 337 /* 341 338 * Setup of an I/O control context for the partition handle for later use. 342 339 */ 343 rc = SUPR0IoCtlSetupForHandle(pGVM->pSession, p VM->nem.s.hPartitionDevice, 0, &pGVM->nemr0.s.pIoCtlCtx);340 rc = SUPR0IoCtlSetupForHandle(pGVM->pSession, pGVM->nem.s.hPartitionDevice, 0, &pGVM->nemr0.s.pIoCtlCtx); 344 341 AssertLogRelRCReturn(rc, rc); 345 342 for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++) … … 357 354 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("IoCtlGetHvPartitionId failed: %#x\n", rcNt), VERR_NEM_INIT_FAILED); 358 355 pGVM->nemr0.s.idHvPartition = pVCpu0->nem.s.uIoCtlBuf.idPartition; 359 AssertLogRelMsgReturn(pGVM->nemr0.s.idHvPartition == p VM->nem.s.idHvPartition,360 ("idHvPartition mismatch: r0=%#RX64, r3=%#RX64\n", pGVM->nemr0.s.idHvPartition, p VM->nem.s.idHvPartition),356 AssertLogRelMsgReturn(pGVM->nemr0.s.idHvPartition == pGVM->nem.s.idHvPartition, 357 ("idHvPartition mismatch: r0=%#RX64, r3=%#RX64\n", pGVM->nemr0.s.idHvPartition, pGVM->nem.s.idHvPartition), 361 358 VERR_NEM_INIT_FAILED); 362 359 } … … 424 421 * Worker for NEMR0MapPages and others. 425 422 */ 426 NEM_TMPL_STATIC int nemR0WinMapPages(PGVM pGVM, P VMCC pVM, PGVMCPU pGVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,423 NEM_TMPL_STATIC int nemR0WinMapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst, 427 424 uint32_t cPages, uint32_t fFlags) 428 425 { … … 458 455 { 459 456 RTHCPHYS HCPhys = NIL_RTGCPHYS; 460 int rc = PGMPhysGCPhys2HCPhys(p VM, GCPhysSrc, &HCPhys);457 int rc = PGMPhysGCPhys2HCPhys(pGVM, GCPhysSrc, &HCPhys); 461 458 AssertRCReturn(rc, rc); 462 459 pMapPages->PageList[iPage] = HCPhys >> X86_PAGE_SHIFT; … … 501 498 * @returns VBox status code. 502 499 * @param pGVM The ring-0 VM handle. 503 * @param pVM The cross context VM handle.504 500 * @param idCpu The calling EMT. Necessary for getting the 505 501 * hypercall page and arguments. 506 502 * @thread EMT(idCpu) 507 503 */ 508 VMMR0_INT_DECL(int) NEMR0MapPages(PGVM pGVM, PVMCC pVM,VMCPUID idCpu)504 VMMR0_INT_DECL(int) NEMR0MapPages(PGVM pGVM, VMCPUID idCpu) 509 505 { 510 506 /* 511 507 * Unpack the call. 512 508 */ 513 int rc = GVMMR0ValidateGVMand VMandEMT(pGVM, pVM, idCpu);509 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu); 514 510 if (RT_SUCCESS(rc)) 515 511 { 516 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);517 512 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu]; 518 513 519 RTGCPHYS const GCPhysSrc = p VCpu->nem.s.Hypercall.MapPages.GCPhysSrc;520 RTGCPHYS const GCPhysDst = p VCpu->nem.s.Hypercall.MapPages.GCPhysDst;521 uint32_t const cPages = p VCpu->nem.s.Hypercall.MapPages.cPages;522 HV_MAP_GPA_FLAGS const fFlags = p VCpu->nem.s.Hypercall.MapPages.fFlags;514 RTGCPHYS const GCPhysSrc = pGVCpu->nem.s.Hypercall.MapPages.GCPhysSrc; 515 RTGCPHYS const GCPhysDst = pGVCpu->nem.s.Hypercall.MapPages.GCPhysDst; 516 uint32_t const cPages = pGVCpu->nem.s.Hypercall.MapPages.cPages; 517 HV_MAP_GPA_FLAGS const fFlags = pGVCpu->nem.s.Hypercall.MapPages.fFlags; 523 518 524 519 /* 525 520 * Do the work. 526 521 */ 527 rc = nemR0WinMapPages(pGVM, p VM, pGVCpu, GCPhysSrc, GCPhysDst, cPages, fFlags);522 rc = nemR0WinMapPages(pGVM, pGVCpu, GCPhysSrc, GCPhysDst, cPages, fFlags); 528 523 } 529 524 return rc; … … 581 576 * @returns VBox status code. 582 577 * @param pGVM The ring-0 VM handle. 583 * @param pVM The cross context VM handle.584 578 * @param idCpu The calling EMT. Necessary for getting the 585 579 * hypercall page and arguments. 586 580 * @thread EMT(idCpu) 587 581 */ 588 VMMR0_INT_DECL(int) NEMR0UnmapPages(PGVM pGVM, PVMCC pVM,VMCPUID idCpu)582 VMMR0_INT_DECL(int) NEMR0UnmapPages(PGVM pGVM, VMCPUID idCpu) 589 583 { 590 584 /* 591 585 * Unpack the call. 592 586 */ 593 int rc = GVMMR0ValidateGVMand VMandEMT(pGVM, pVM, idCpu);587 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu); 594 588 if (RT_SUCCESS(rc)) 595 589 { 596 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);597 590 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu]; 598 591 599 RTGCPHYS const GCPhys = p VCpu->nem.s.Hypercall.UnmapPages.GCPhys;600 uint32_t const cPages = p VCpu->nem.s.Hypercall.UnmapPages.cPages;592 RTGCPHYS const GCPhys = pGVCpu->nem.s.Hypercall.UnmapPages.GCPhys; 593 uint32_t const cPages = pGVCpu->nem.s.Hypercall.UnmapPages.cPages; 601 594 602 595 /* … … 622 615 NEM_TMPL_STATIC int nemR0WinExportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx) 623 616 { 624 PVMCPUCC pVCpu = pGVCpu;625 617 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage; 626 618 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3); … … 633 625 uint64_t const fWhat = ~pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK); 634 626 if ( !fWhat 635 && p VCpu->nem.s.fCurrentInterruptWindows == pVCpu->nem.s.fDesiredInterruptWindows)627 && pGVCpu->nem.s.fCurrentInterruptWindows == pGVCpu->nem.s.fDesiredInterruptWindows) 636 628 return VINF_SUCCESS; 637 629 uintptr_t iReg = 0; … … 867 859 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 868 860 pInput->Elements[iReg].Name = HvX64RegisterCr8; 869 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestCR8(p VCpu);861 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestCR8(pGVCpu); 870 862 iReg++; 871 863 } … … 879 871 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 880 872 pInput->Elements[iReg].Name = HvX64RegisterDr0; 881 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR0(p VCpu);873 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR0(pGVCpu); 882 874 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[0]; 883 875 iReg++; 884 876 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 885 877 pInput->Elements[iReg].Name = HvX64RegisterDr1; 886 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR1(p VCpu);878 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR1(pGVCpu); 887 879 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[1]; 888 880 iReg++; 889 881 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 890 882 pInput->Elements[iReg].Name = HvX64RegisterDr2; 891 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR2(p VCpu);883 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR2(pGVCpu); 892 884 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[2]; 893 885 iReg++; 894 886 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 895 887 pInput->Elements[iReg].Name = HvX64RegisterDr3; 896 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR3(p VCpu);888 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR3(pGVCpu); 897 889 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[3]; 898 890 iReg++; … … 902 894 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 903 895 pInput->Elements[iReg].Name = HvX64RegisterDr6; 904 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR6(p VCpu);896 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR6(pGVCpu); 905 897 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[6]; 906 898 iReg++; … … 910 902 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 911 903 pInput->Elements[iReg].Name = HvX64RegisterDr7; 912 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR7(p VCpu);904 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR7(pGVCpu); 913 905 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[7]; 914 906 iReg++; … … 1120 1112 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1121 1113 pInput->Elements[iReg].Name = HvX64RegisterApicBase; 1122 pInput->Elements[iReg].Value.Reg64 = APICGetBaseMsrNoCheck(p VCpu);1114 pInput->Elements[iReg].Value.Reg64 = APICGetBaseMsrNoCheck(pGVCpu); 1123 1115 iReg++; 1124 1116 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); … … 1129 1121 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1130 1122 pInput->Elements[iReg].Name = HvX64RegisterMtrrCap; 1131 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestIa32MtrrCap(p VCpu);1123 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestIa32MtrrCap(pGVCpu); 1132 1124 iReg++; 1133 1125 # endif 1134 1126 1135 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(p VCpu);1127 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pGVCpu); 1136 1128 1137 1129 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); … … 1192 1184 1193 1185 # if 0 /** @todo Why can't we write these on Intel systems? Not that we really care... */ 1194 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM ->pVM);1186 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM); 1195 1187 if (enmCpuVendor != CPUMCPUVENDOR_AMD) 1196 1188 { … … 1201 1193 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1202 1194 pInput->Elements[iReg].Name = HvX64RegisterIa32FeatureControl; 1203 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestIa32FeatureControl(p VCpu);1195 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestIa32FeatureControl(pGVCpu); 1204 1196 iReg++; 1205 1197 } … … 1224 1216 pInput->Elements[iReg].Name = HvRegisterInterruptState; 1225 1217 pInput->Elements[iReg].Value.Reg64 = 0; 1226 if ( VMCPU_FF_IS_SET(p VCpu, VMCPU_FF_INHIBIT_INTERRUPTS)1227 && EMGetInhibitInterruptsPC(p VCpu) == pCtx->rip)1218 if ( VMCPU_FF_IS_SET(pGVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 1219 && EMGetInhibitInterruptsPC(pGVCpu) == pCtx->rip) 1228 1220 pInput->Elements[iReg].Value.InterruptState.InterruptShadow = 1; 1229 if (VMCPU_FF_IS_SET(p VCpu, VMCPU_FF_BLOCK_NMIS))1221 if (VMCPU_FF_IS_SET(pGVCpu, VMCPU_FF_BLOCK_NMIS)) 1230 1222 pInput->Elements[iReg].Value.InterruptState.NmiMasked = 1; 1231 1223 iReg++; … … 1233 1225 else if (fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT) 1234 1226 { 1235 if ( p VCpu->nem.s.fLastInterruptShadow1236 || ( VMCPU_FF_IS_SET(p VCpu, VMCPU_FF_INHIBIT_INTERRUPTS)1237 && EMGetInhibitInterruptsPC(p VCpu) == pCtx->rip))1227 if ( pGVCpu->nem.s.fLastInterruptShadow 1228 || ( VMCPU_FF_IS_SET(pGVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 1229 && EMGetInhibitInterruptsPC(pGVCpu) == pCtx->rip)) 1238 1230 { 1239 1231 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1240 1232 pInput->Elements[iReg].Name = HvRegisterInterruptState; 1241 1233 pInput->Elements[iReg].Value.Reg64 = 0; 1242 if ( VMCPU_FF_IS_SET(p VCpu, VMCPU_FF_INHIBIT_INTERRUPTS)1243 && EMGetInhibitInterruptsPC(p VCpu) == pCtx->rip)1234 if ( VMCPU_FF_IS_SET(pGVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 1235 && EMGetInhibitInterruptsPC(pGVCpu) == pCtx->rip) 1244 1236 pInput->Elements[iReg].Value.InterruptState.InterruptShadow = 1; 1245 1237 /** @todo Retrieve NMI state, currently assuming it's zero. (yes this may happen on I/O) */ 1246 //if (VMCPU_FF_IS_ANY_SET(p VCpu, VMCPU_FF_BLOCK_NMIS))1238 //if (VMCPU_FF_IS_ANY_SET(pGVCpu, VMCPU_FF_BLOCK_NMIS)) 1247 1239 // pInput->Elements[iReg].Value.InterruptState.NmiMasked = 1; 1248 1240 iReg++; … … 1253 1245 1254 1246 /* Interrupt windows. Always set if active as Hyper-V seems to be forgetful. */ 1255 uint8_t const fDesiredIntWin = p VCpu->nem.s.fDesiredInterruptWindows;1247 uint8_t const fDesiredIntWin = pGVCpu->nem.s.fDesiredInterruptWindows; 1256 1248 if ( fDesiredIntWin 1257 || p VCpu->nem.s.fCurrentInterruptWindows != fDesiredIntWin)1258 { 1259 p VCpu->nem.s.fCurrentInterruptWindows = pVCpu->nem.s.fDesiredInterruptWindows;1249 || pGVCpu->nem.s.fCurrentInterruptWindows != fDesiredIntWin) 1250 { 1251 pGVCpu->nem.s.fCurrentInterruptWindows = pGVCpu->nem.s.fDesiredInterruptWindows; 1260 1252 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1261 1253 pInput->Elements[iReg].Name = HvX64RegisterDeliverabilityNotifications; … … 1296 1288 * @returns VBox status code 1297 1289 * @param pGVM The ring-0 VM handle. 1298 * @param pVM The cross context VM handle.1299 1290 * @param idCpu The calling EMT. Necessary for getting the 1300 1291 * hypercall page and arguments. 1301 1292 */ 1302 VMMR0_INT_DECL(int) NEMR0ExportState(PGVM pGVM, PVMCC pVM,VMCPUID idCpu)1293 VMMR0_INT_DECL(int) NEMR0ExportState(PGVM pGVM, VMCPUID idCpu) 1303 1294 { 1304 1295 #if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) … … 1306 1297 * Validate the call. 1307 1298 */ 1308 int rc = GVMMR0ValidateGVMand VMandEMT(pGVM, pVM, idCpu);1299 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu); 1309 1300 if (RT_SUCCESS(rc)) 1310 1301 { 1311 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);1312 1302 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu]; 1313 1303 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API); … … 1316 1306 * Call worker. 1317 1307 */ 1318 rc = nemR0WinExportState(pGVM, pGVCpu, &p VCpu->cpum.GstCtx);1308 rc = nemR0WinExportState(pGVM, pGVCpu, &pGVCpu->cpum.GstCtx); 1319 1309 } 1320 1310 return rc; 1321 1311 #else 1322 RT_NOREF(pGVM, pVM,idCpu);1312 RT_NOREF(pGVM, idCpu); 1323 1313 return VERR_NOT_IMPLEMENTED; 1324 1314 #endif … … 1576 1566 * Copy information to the CPUM context. 1577 1567 */ 1578 PVMCPUCC pVCpu = pGVCpu;1579 1568 iReg = 0; 1580 1569 … … 1745 1734 if (pCtx->cr0 != paValues[iReg].Reg64) 1746 1735 { 1747 CPUMSetGuestCR0(p VCpu, paValues[iReg].Reg64);1736 CPUMSetGuestCR0(pGVCpu, paValues[iReg].Reg64); 1748 1737 fMaybeChangedMode = true; 1749 1738 } … … 1761 1750 if (pCtx->cr3 != paValues[iReg].Reg64) 1762 1751 { 1763 CPUMSetGuestCR3(p VCpu, paValues[iReg].Reg64);1752 CPUMSetGuestCR3(pGVCpu, paValues[iReg].Reg64); 1764 1753 fUpdateCr3 = true; 1765 1754 } … … 1771 1760 if (pCtx->cr4 != paValues[iReg].Reg64) 1772 1761 { 1773 CPUMSetGuestCR4(p VCpu, paValues[iReg].Reg64);1762 CPUMSetGuestCR4(pGVCpu, paValues[iReg].Reg64); 1774 1763 fMaybeChangedMode = true; 1775 1764 } … … 1780 1769 { 1781 1770 Assert(pInput->Names[iReg] == HvX64RegisterCr8); 1782 APICSetTpr(p VCpu, (uint8_t)paValues[iReg].Reg64 << 4);1771 APICSetTpr(pGVCpu, (uint8_t)paValues[iReg].Reg64 << 4); 1783 1772 iReg++; 1784 1773 } … … 1789 1778 Assert(pInput->Names[iReg] == HvX64RegisterDr7); 1790 1779 if (pCtx->dr[7] != paValues[iReg].Reg64) 1791 CPUMSetGuestDR7(p VCpu, paValues[iReg].Reg64);1780 CPUMSetGuestDR7(pGVCpu, paValues[iReg].Reg64); 1792 1781 pCtx->fExtrn &= ~CPUMCTX_EXTRN_DR7; /* Hack alert! Avoids asserting when processing CPUMCTX_EXTRN_DR0_DR3. */ 1793 1782 iReg++; … … 1798 1787 Assert(pInput->Names[iReg+3] == HvX64RegisterDr3); 1799 1788 if (pCtx->dr[0] != paValues[iReg].Reg64) 1800 CPUMSetGuestDR0(p VCpu, paValues[iReg].Reg64);1789 CPUMSetGuestDR0(pGVCpu, paValues[iReg].Reg64); 1801 1790 iReg++; 1802 1791 if (pCtx->dr[1] != paValues[iReg].Reg64) 1803 CPUMSetGuestDR1(p VCpu, paValues[iReg].Reg64);1792 CPUMSetGuestDR1(pGVCpu, paValues[iReg].Reg64); 1804 1793 iReg++; 1805 1794 if (pCtx->dr[2] != paValues[iReg].Reg64) 1806 CPUMSetGuestDR2(p VCpu, paValues[iReg].Reg64);1795 CPUMSetGuestDR2(pGVCpu, paValues[iReg].Reg64); 1807 1796 iReg++; 1808 1797 if (pCtx->dr[3] != paValues[iReg].Reg64) 1809 CPUMSetGuestDR3(p VCpu, paValues[iReg].Reg64);1798 CPUMSetGuestDR3(pGVCpu, paValues[iReg].Reg64); 1810 1799 iReg++; 1811 1800 } … … 1814 1803 Assert(pInput->Names[iReg] == HvX64RegisterDr6); 1815 1804 if (pCtx->dr[6] != paValues[iReg].Reg64) 1816 CPUMSetGuestDR6(p VCpu, paValues[iReg].Reg64);1805 CPUMSetGuestDR6(pGVCpu, paValues[iReg].Reg64); 1817 1806 iReg++; 1818 1807 } … … 1937 1926 if (paValues[iReg].Reg64 != pCtx->msrEFER) 1938 1927 { 1939 Log7(("NEM/%u: MSR EFER changed %RX64 -> %RX64\n", p VCpu->idCpu, pCtx->msrEFER, paValues[iReg].Reg64));1928 Log7(("NEM/%u: MSR EFER changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrEFER, paValues[iReg].Reg64)); 1940 1929 if ((paValues[iReg].Reg64 ^ pCtx->msrEFER) & MSR_K6_EFER_NXE) 1941 PGMNotifyNxeChanged(p VCpu, RT_BOOL(paValues[iReg].Reg64 & MSR_K6_EFER_NXE));1930 PGMNotifyNxeChanged(pGVCpu, RT_BOOL(paValues[iReg].Reg64 & MSR_K6_EFER_NXE)); 1942 1931 pCtx->msrEFER = paValues[iReg].Reg64; 1943 1932 fMaybeChangedMode = true; … … 1949 1938 Assert(pInput->Names[iReg] == HvX64RegisterKernelGsBase); 1950 1939 if (pCtx->msrKERNELGSBASE != paValues[iReg].Reg64) 1951 Log7(("NEM/%u: MSR KERNELGSBASE changed %RX64 -> %RX64\n", p VCpu->idCpu, pCtx->msrKERNELGSBASE, paValues[iReg].Reg64));1940 Log7(("NEM/%u: MSR KERNELGSBASE changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrKERNELGSBASE, paValues[iReg].Reg64)); 1952 1941 pCtx->msrKERNELGSBASE = paValues[iReg].Reg64; 1953 1942 iReg++; … … 1957 1946 Assert(pInput->Names[iReg] == HvX64RegisterSysenterCs); 1958 1947 if (pCtx->SysEnter.cs != paValues[iReg].Reg64) 1959 Log7(("NEM/%u: MSR SYSENTER.CS changed %RX64 -> %RX64\n", p VCpu->idCpu, pCtx->SysEnter.cs, paValues[iReg].Reg64));1948 Log7(("NEM/%u: MSR SYSENTER.CS changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->SysEnter.cs, paValues[iReg].Reg64)); 1960 1949 pCtx->SysEnter.cs = paValues[iReg].Reg64; 1961 1950 iReg++; … … 1963 1952 Assert(pInput->Names[iReg] == HvX64RegisterSysenterEip); 1964 1953 if (pCtx->SysEnter.eip != paValues[iReg].Reg64) 1965 Log7(("NEM/%u: MSR SYSENTER.EIP changed %RX64 -> %RX64\n", p VCpu->idCpu, pCtx->SysEnter.eip, paValues[iReg].Reg64));1954 Log7(("NEM/%u: MSR SYSENTER.EIP changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->SysEnter.eip, paValues[iReg].Reg64)); 1966 1955 pCtx->SysEnter.eip = paValues[iReg].Reg64; 1967 1956 iReg++; … … 1969 1958 Assert(pInput->Names[iReg] == HvX64RegisterSysenterEsp); 1970 1959 if (pCtx->SysEnter.esp != paValues[iReg].Reg64) 1971 Log7(("NEM/%u: MSR SYSENTER.ESP changed %RX64 -> %RX64\n", p VCpu->idCpu, pCtx->SysEnter.esp, paValues[iReg].Reg64));1960 Log7(("NEM/%u: MSR SYSENTER.ESP changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->SysEnter.esp, paValues[iReg].Reg64)); 1972 1961 pCtx->SysEnter.esp = paValues[iReg].Reg64; 1973 1962 iReg++; … … 1977 1966 Assert(pInput->Names[iReg] == HvX64RegisterStar); 1978 1967 if (pCtx->msrSTAR != paValues[iReg].Reg64) 1979 Log7(("NEM/%u: MSR STAR changed %RX64 -> %RX64\n", p VCpu->idCpu, pCtx->msrSTAR, paValues[iReg].Reg64));1968 Log7(("NEM/%u: MSR STAR changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrSTAR, paValues[iReg].Reg64)); 1980 1969 pCtx->msrSTAR = paValues[iReg].Reg64; 1981 1970 iReg++; … … 1983 1972 Assert(pInput->Names[iReg] == HvX64RegisterLstar); 1984 1973 if (pCtx->msrLSTAR != paValues[iReg].Reg64) 1985 Log7(("NEM/%u: MSR LSTAR changed %RX64 -> %RX64\n", p VCpu->idCpu, pCtx->msrLSTAR, paValues[iReg].Reg64));1974 Log7(("NEM/%u: MSR LSTAR changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrLSTAR, paValues[iReg].Reg64)); 1986 1975 pCtx->msrLSTAR = paValues[iReg].Reg64; 1987 1976 iReg++; … … 1989 1978 Assert(pInput->Names[iReg] == HvX64RegisterCstar); 1990 1979 if (pCtx->msrCSTAR != paValues[iReg].Reg64) 1991 Log7(("NEM/%u: MSR CSTAR changed %RX64 -> %RX64\n", p VCpu->idCpu, pCtx->msrCSTAR, paValues[iReg].Reg64));1980 Log7(("NEM/%u: MSR CSTAR changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrCSTAR, paValues[iReg].Reg64)); 1992 1981 pCtx->msrCSTAR = paValues[iReg].Reg64; 1993 1982 iReg++; … … 1995 1984 Assert(pInput->Names[iReg] == HvX64RegisterSfmask); 1996 1985 if (pCtx->msrSFMASK != paValues[iReg].Reg64) 1997 Log7(("NEM/%u: MSR SFMASK changed %RX64 -> %RX64\n", p VCpu->idCpu, pCtx->msrSFMASK, paValues[iReg].Reg64));1986 Log7(("NEM/%u: MSR SFMASK changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrSFMASK, paValues[iReg].Reg64)); 1998 1987 pCtx->msrSFMASK = paValues[iReg].Reg64; 1999 1988 iReg++; … … 2002 1991 { 2003 1992 Assert(pInput->Names[iReg] == HvX64RegisterApicBase); 2004 const uint64_t uOldBase = APICGetBaseMsrNoCheck(p VCpu);1993 const uint64_t uOldBase = APICGetBaseMsrNoCheck(pGVCpu); 2005 1994 if (paValues[iReg].Reg64 != uOldBase) 2006 1995 { 2007 1996 Log7(("NEM/%u: MSR APICBase changed %RX64 -> %RX64 (%RX64)\n", 2008 p VCpu->idCpu, uOldBase, paValues[iReg].Reg64, paValues[iReg].Reg64 ^ uOldBase));2009 int rc2 = APICSetBaseMsr(p VCpu, paValues[iReg].Reg64);1997 pGVCpu->idCpu, uOldBase, paValues[iReg].Reg64, paValues[iReg].Reg64 ^ uOldBase)); 1998 int rc2 = APICSetBaseMsr(pGVCpu, paValues[iReg].Reg64); 2010 1999 AssertLogRelMsg(rc2 == VINF_SUCCESS, ("rc2=%Rrc [%#RX64]\n", rc2, paValues[iReg].Reg64)); 2011 2000 } … … 2014 2003 Assert(pInput->Names[iReg] == HvX64RegisterPat); 2015 2004 if (pCtx->msrPAT != paValues[iReg].Reg64) 2016 Log7(("NEM/%u: MSR PAT changed %RX64 -> %RX64\n", p VCpu->idCpu, pCtx->msrPAT, paValues[iReg].Reg64));2005 Log7(("NEM/%u: MSR PAT changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrPAT, paValues[iReg].Reg64)); 2017 2006 pCtx->msrPAT = paValues[iReg].Reg64; 2018 2007 iReg++; … … 2020 2009 # if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */ 2021 2010 Assert(pInput->Names[iReg] == HvX64RegisterMtrrCap); 2022 if (paValues[iReg].Reg64 != CPUMGetGuestIa32MtrrCap(p VCpu))2023 Log7(("NEM/%u: MSR MTRR_CAP changed %RX64 -> %RX64 (!!)\n", p VCpu->idCpu, CPUMGetGuestIa32MtrrCap(pVCpu), paValues[iReg].Reg64));2011 if (paValues[iReg].Reg64 != CPUMGetGuestIa32MtrrCap(pGVCpu)) 2012 Log7(("NEM/%u: MSR MTRR_CAP changed %RX64 -> %RX64 (!!)\n", pGVCpu->idCpu, CPUMGetGuestIa32MtrrCap(pGVCpu), paValues[iReg].Reg64)); 2024 2013 iReg++; 2025 2014 # endif 2026 2015 2027 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(p VCpu);2016 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pGVCpu); 2028 2017 Assert(pInput->Names[iReg] == HvX64RegisterMtrrDefType); 2029 2018 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrDefType ) 2030 Log7(("NEM/%u: MSR MTRR_DEF_TYPE changed %RX64 -> %RX64\n", p VCpu->idCpu, pCtxMsrs->msr.MtrrDefType, paValues[iReg].Reg64));2019 Log7(("NEM/%u: MSR MTRR_DEF_TYPE changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrDefType, paValues[iReg].Reg64)); 2031 2020 pCtxMsrs->msr.MtrrDefType = paValues[iReg].Reg64; 2032 2021 iReg++; … … 2036 2025 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix64k00000); 2037 2026 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix64K_00000 ) 2038 Log7(("NEM/%u: MSR MTRR_FIX16K_00000 changed %RX64 -> %RX64\n", p VCpu->idCpu, pCtxMsrs->msr.MtrrFix64K_00000, paValues[iReg].Reg64));2027 Log7(("NEM/%u: MSR MTRR_FIX16K_00000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix64K_00000, paValues[iReg].Reg64)); 2039 2028 pCtxMsrs->msr.MtrrFix64K_00000 = paValues[iReg].Reg64; 2040 2029 iReg++; … … 2042 2031 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix16k80000); 2043 2032 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix16K_80000 ) 2044 Log7(("NEM/%u: MSR MTRR_FIX16K_80000 changed %RX64 -> %RX64\n", p VCpu->idCpu, pCtxMsrs->msr.MtrrFix16K_80000, paValues[iReg].Reg64));2033 Log7(("NEM/%u: MSR MTRR_FIX16K_80000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix16K_80000, paValues[iReg].Reg64)); 2045 2034 pCtxMsrs->msr.MtrrFix16K_80000 = paValues[iReg].Reg64; 2046 2035 iReg++; … … 2048 2037 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix16kA0000); 2049 2038 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix16K_A0000 ) 2050 Log7(("NEM/%u: MSR MTRR_FIX16K_A0000 changed %RX64 -> %RX64\n", p VCpu->idCpu, pCtxMsrs->msr.MtrrFix16K_A0000, paValues[iReg].Reg64));2039 Log7(("NEM/%u: MSR MTRR_FIX16K_A0000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix16K_A0000, paValues[iReg].Reg64)); 2051 2040 pCtxMsrs->msr.MtrrFix16K_A0000 = paValues[iReg].Reg64; 2052 2041 iReg++; … … 2054 2043 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kC0000); 2055 2044 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_C0000 ) 2056 Log7(("NEM/%u: MSR MTRR_FIX16K_C0000 changed %RX64 -> %RX64\n", p VCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_C0000, paValues[iReg].Reg64));2045 Log7(("NEM/%u: MSR MTRR_FIX16K_C0000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_C0000, paValues[iReg].Reg64)); 2057 2046 pCtxMsrs->msr.MtrrFix4K_C0000 = paValues[iReg].Reg64; 2058 2047 iReg++; … … 2060 2049 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kC8000); 2061 2050 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_C8000 ) 2062 Log7(("NEM/%u: MSR MTRR_FIX16K_C8000 changed %RX64 -> %RX64\n", p VCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_C8000, paValues[iReg].Reg64));2051 Log7(("NEM/%u: MSR MTRR_FIX16K_C8000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_C8000, paValues[iReg].Reg64)); 2063 2052 pCtxMsrs->msr.MtrrFix4K_C8000 = paValues[iReg].Reg64; 2064 2053 iReg++; … … 2066 2055 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kD0000); 2067 2056 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_D0000 ) 2068 Log7(("NEM/%u: MSR MTRR_FIX16K_D0000 changed %RX64 -> %RX64\n", p VCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_D0000, paValues[iReg].Reg64));2057 Log7(("NEM/%u: MSR MTRR_FIX16K_D0000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_D0000, paValues[iReg].Reg64)); 2069 2058 pCtxMsrs->msr.MtrrFix4K_D0000 = paValues[iReg].Reg64; 2070 2059 iReg++; … … 2072 2061 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kD8000); 2073 2062 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_D8000 ) 2074 Log7(("NEM/%u: MSR MTRR_FIX16K_D8000 changed %RX64 -> %RX64\n", p VCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_D8000, paValues[iReg].Reg64));2063 Log7(("NEM/%u: MSR MTRR_FIX16K_D8000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_D8000, paValues[iReg].Reg64)); 2075 2064 pCtxMsrs->msr.MtrrFix4K_D8000 = paValues[iReg].Reg64; 2076 2065 iReg++; … … 2078 2067 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kE0000); 2079 2068 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_E0000 ) 2080 Log7(("NEM/%u: MSR MTRR_FIX16K_E0000 changed %RX64 -> %RX64\n", p VCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_E0000, paValues[iReg].Reg64));2069 Log7(("NEM/%u: MSR MTRR_FIX16K_E0000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_E0000, paValues[iReg].Reg64)); 2081 2070 pCtxMsrs->msr.MtrrFix4K_E0000 = paValues[iReg].Reg64; 2082 2071 iReg++; … … 2084 2073 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kE8000); 2085 2074 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_E8000 ) 2086 Log7(("NEM/%u: MSR MTRR_FIX16K_E8000 changed %RX64 -> %RX64\n", p VCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_E8000, paValues[iReg].Reg64));2075 Log7(("NEM/%u: MSR MTRR_FIX16K_E8000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_E8000, paValues[iReg].Reg64)); 2087 2076 pCtxMsrs->msr.MtrrFix4K_E8000 = paValues[iReg].Reg64; 2088 2077 iReg++; … … 2090 2079 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kF0000); 2091 2080 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_F0000 ) 2092 Log7(("NEM/%u: MSR MTRR_FIX16K_F0000 changed %RX64 -> %RX64\n", p VCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_F0000, paValues[iReg].Reg64));2081 Log7(("NEM/%u: MSR MTRR_FIX16K_F0000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_F0000, paValues[iReg].Reg64)); 2093 2082 pCtxMsrs->msr.MtrrFix4K_F0000 = paValues[iReg].Reg64; 2094 2083 iReg++; … … 2096 2085 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kF8000); 2097 2086 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_F8000 ) 2098 Log7(("NEM/%u: MSR MTRR_FIX16K_F8000 changed %RX64 -> %RX64\n", p VCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_F8000, paValues[iReg].Reg64));2087 Log7(("NEM/%u: MSR MTRR_FIX16K_F8000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_F8000, paValues[iReg].Reg64)); 2099 2088 pCtxMsrs->msr.MtrrFix4K_F8000 = paValues[iReg].Reg64; 2100 2089 iReg++; … … 2102 2091 Assert(pInput->Names[iReg] == HvX64RegisterTscAux); 2103 2092 if (paValues[iReg].Reg64 != pCtxMsrs->msr.TscAux ) 2104 Log7(("NEM/%u: MSR TSC_AUX changed %RX64 -> %RX64\n", p VCpu->idCpu, pCtxMsrs->msr.TscAux, paValues[iReg].Reg64));2093 Log7(("NEM/%u: MSR TSC_AUX changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.TscAux, paValues[iReg].Reg64)); 2105 2094 pCtxMsrs->msr.TscAux = paValues[iReg].Reg64; 2106 2095 iReg++; … … 2111 2100 Assert(pInput->Names[iReg] == HvX64RegisterIa32MiscEnable); 2112 2101 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MiscEnable) 2113 Log7(("NEM/%u: MSR MISC_ENABLE changed %RX64 -> %RX64\n", p VCpu->idCpu, pCtxMsrs->msr.MiscEnable, paValues[iReg].Reg64));2102 Log7(("NEM/%u: MSR MISC_ENABLE changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MiscEnable, paValues[iReg].Reg64)); 2114 2103 pCtxMsrs->msr.MiscEnable = paValues[iReg].Reg64; 2115 2104 iReg++; … … 2121 2110 Assert(pInput->Names[iReg] == HvX64RegisterIa32FeatureControl); 2122 2111 if (paValues[iReg].Reg64 != pCtx->hwvirt.vmx.Msrs.u64FeatCtrl) 2123 Log7(("NEM/%u: MSR FEATURE_CONTROL changed %RX64 -> %RX64 (!!)\n", p VCpu->idCpu, pCtx->hwvirt.vmx.Msrs.u64FeatCtrl, paValues[iReg].Reg64));2112 Log7(("NEM/%u: MSR FEATURE_CONTROL changed %RX64 -> %RX64 (!!)\n", pGVCpu->idCpu, pCtx->hwvirt.vmx.Msrs.u64FeatCtrl, paValues[iReg].Reg64)); 2124 2113 iReg++; 2125 2114 } … … 2135 2124 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT)) 2136 2125 { 2137 p VCpu->nem.s.fLastInterruptShadow = paValues[iReg].InterruptState.InterruptShadow;2126 pGVCpu->nem.s.fLastInterruptShadow = paValues[iReg].InterruptState.InterruptShadow; 2138 2127 if (paValues[iReg].InterruptState.InterruptShadow) 2139 EMSetInhibitInterruptsPC(p VCpu, paValues[iReg + 1].Reg64);2128 EMSetInhibitInterruptsPC(pGVCpu, paValues[iReg + 1].Reg64); 2140 2129 else 2141 VMCPU_FF_CLEAR(p VCpu, VMCPU_FF_INHIBIT_INTERRUPTS);2130 VMCPU_FF_CLEAR(pGVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 2142 2131 } 2143 2132 … … 2145 2134 { 2146 2135 if (paValues[iReg].InterruptState.NmiMasked) 2147 VMCPU_FF_SET(p VCpu, VMCPU_FF_BLOCK_NMIS);2136 VMCPU_FF_SET(pGVCpu, VMCPU_FF_BLOCK_NMIS); 2148 2137 else 2149 VMCPU_FF_CLEAR(p VCpu, VMCPU_FF_BLOCK_NMIS);2138 VMCPU_FF_CLEAR(pGVCpu, VMCPU_FF_BLOCK_NMIS); 2150 2139 } 2151 2140 … … 2185 2174 if (fMaybeChangedMode) 2186 2175 { 2187 rc = PGMChangeMode(p VCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);2176 rc = PGMChangeMode(pGVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER); 2188 2177 AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_1); 2189 2178 } … … 2194 2183 { 2195 2184 LogFlow(("nemR0WinImportState: -> PGMUpdateCR3!\n")); 2196 rc = PGMUpdateCR3(p VCpu, pCtx->cr3);2185 rc = PGMUpdateCR3(pGVCpu, pCtx->cr3); 2197 2186 AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_2); 2198 2187 } … … 2214 2203 * @returns VBox status code 2215 2204 * @param pGVM The ring-0 VM handle. 2216 * @param pVM The cross context VM handle.2217 2205 * @param idCpu The calling EMT. Necessary for getting the 2218 2206 * hypercall page and arguments. … … 2220 2208 * CPUMCTX_EXTERN_ALL for everything. 2221 2209 */ 2222 VMMR0_INT_DECL(int) NEMR0ImportState(PGVM pGVM, PVMCC pVM,VMCPUID idCpu, uint64_t fWhat)2210 VMMR0_INT_DECL(int) NEMR0ImportState(PGVM pGVM, VMCPUID idCpu, uint64_t fWhat) 2223 2211 { 2224 2212 #if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) … … 2226 2214 * Validate the call. 2227 2215 */ 2228 int rc = GVMMR0ValidateGVMand VMandEMT(pGVM, pVM, idCpu);2216 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu); 2229 2217 if (RT_SUCCESS(rc)) 2230 2218 { 2231 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);2232 2219 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu]; 2233 2220 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API); … … 2236 2223 * Call worker. 2237 2224 */ 2238 rc = nemR0WinImportState(pGVM, pGVCpu, &p VCpu->cpum.GstCtx, fWhat, false /*fCanUpdateCr3*/);2225 rc = nemR0WinImportState(pGVM, pGVCpu, &pGVCpu->cpum.GstCtx, fWhat, false /*fCanUpdateCr3*/); 2239 2226 } 2240 2227 return rc; 2241 2228 #else 2242 RT_NOREF(pGVM, pVM,idCpu, fWhat);2229 RT_NOREF(pGVM, idCpu, fWhat); 2243 2230 return VERR_NOT_IMPLEMENTED; 2244 2231 #endif … … 2300 2287 * @returns VBox status code 2301 2288 * @param pGVM The ring-0 VM handle. 2302 * @param pVM The cross context VM handle.2303 2289 * @param idCpu The calling EMT. Necessary for getting the 2304 2290 * hypercall page and arguments. 2305 2291 */ 2306 VMMR0_INT_DECL(int) NEMR0QueryCpuTick(PGVM pGVM, PVMCC pVM,VMCPUID idCpu)2292 VMMR0_INT_DECL(int) NEMR0QueryCpuTick(PGVM pGVM, VMCPUID idCpu) 2307 2293 { 2308 2294 #if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) … … 2310 2296 * Validate the call. 2311 2297 */ 2312 int rc = GVMMR0ValidateGVMand VMandEMT(pGVM, pVM, idCpu);2298 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu); 2313 2299 if (RT_SUCCESS(rc)) 2314 2300 { 2315 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);2316 2301 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu]; 2317 2302 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API); … … 2320 2305 * Call worker. 2321 2306 */ 2322 p VCpu->nem.s.Hypercall.QueryCpuTick.cTicks = 0;2323 p VCpu->nem.s.Hypercall.QueryCpuTick.uAux = 0;2324 rc = nemR0WinQueryCpuTick(pGVM, pGVCpu, &p VCpu->nem.s.Hypercall.QueryCpuTick.cTicks,2325 &p VCpu->nem.s.Hypercall.QueryCpuTick.uAux);2307 pGVCpu->nem.s.Hypercall.QueryCpuTick.cTicks = 0; 2308 pGVCpu->nem.s.Hypercall.QueryCpuTick.uAux = 0; 2309 rc = nemR0WinQueryCpuTick(pGVM, pGVCpu, &pGVCpu->nem.s.Hypercall.QueryCpuTick.cTicks, 2310 &pGVCpu->nem.s.Hypercall.QueryCpuTick.uAux); 2326 2311 } 2327 2312 return rc; 2328 2313 #else 2329 RT_NOREF(pGVM, pVM,idCpu);2314 RT_NOREF(pGVM, idCpu); 2330 2315 return VERR_NOT_IMPLEMENTED; 2331 2316 #endif … … 2409 2394 * @returns VBox status code 2410 2395 * @param pGVM The ring-0 VM handle. 2411 * @param pVM The cross context VM handle.2412 2396 * @param idCpu The calling EMT. Necessary for getting the 2413 2397 * hypercall page and arguments. 2414 2398 * @param uPausedTscValue The TSC value at the time of pausing. 2415 2399 */ 2416 VMMR0_INT_DECL(int) NEMR0ResumeCpuTickOnAll(PGVM pGVM, PVMCC pVM,VMCPUID idCpu, uint64_t uPausedTscValue)2400 VMMR0_INT_DECL(int) NEMR0ResumeCpuTickOnAll(PGVM pGVM, VMCPUID idCpu, uint64_t uPausedTscValue) 2417 2401 { 2418 2402 #if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) … … 2420 2404 * Validate the call. 2421 2405 */ 2422 int rc = GVMMR0ValidateGVMand VMandEMT(pGVM, pVM, idCpu);2406 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu); 2423 2407 if (RT_SUCCESS(rc)) 2424 2408 { 2425 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);2426 2409 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu]; 2427 2410 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API); … … 2430 2413 * Call worker. 2431 2414 */ 2432 p VCpu->nem.s.Hypercall.QueryCpuTick.cTicks = 0;2433 p VCpu->nem.s.Hypercall.QueryCpuTick.uAux = 0;2415 pGVCpu->nem.s.Hypercall.QueryCpuTick.cTicks = 0; 2416 pGVCpu->nem.s.Hypercall.QueryCpuTick.uAux = 0; 2434 2417 rc = nemR0WinResumeCpuTickOnAll(pGVM, pGVCpu, uPausedTscValue); 2435 2418 } 2436 2419 return rc; 2437 2420 #else 2438 RT_NOREF(pGVM, pVM,idCpu, uPausedTscValue);2421 RT_NOREF(pGVM, idCpu, uPausedTscValue); 2439 2422 return VERR_NOT_IMPLEMENTED; 2440 2423 #endif … … 2446 2429 #ifdef NEM_WIN_WITH_RING0_RUNLOOP 2447 2430 if (pGVM->nemr0.s.fMayUseRing0Runloop) 2448 return nemHCWinRunGC(pGVM, &pGVM->aCpus[idCpu] , pGVM, &pGVM->aCpus[idCpu]);2431 return nemHCWinRunGC(pGVM, &pGVM->aCpus[idCpu]); 2449 2432 return VERR_NEM_RING3_ONLY; 2450 2433 #else … … 2460 2443 * @returns VBox status code. 2461 2444 * @param pGVM The ring-0 VM handle. 2462 * @param pVM The cross context VM handle.2463 2445 * @param idCpu The calling EMT, or NIL. Necessary for getting the hypercall 2464 2446 * page and arguments. 2465 2447 */ 2466 VMMR0_INT_DECL(int) NEMR0UpdateStatistics(PGVM pGVM, PVMCC pVM,VMCPUID idCpu)2448 VMMR0_INT_DECL(int) NEMR0UpdateStatistics(PGVM pGVM, VMCPUID idCpu) 2467 2449 { 2468 2450 /* … … 2471 2453 int rc; 2472 2454 if (idCpu == NIL_VMCPUID) 2473 rc = GVMMR0ValidateGVM andVM(pGVM, pVM);2455 rc = GVMMR0ValidateGVM(pGVM); 2474 2456 else 2475 rc = GVMMR0ValidateGVMand VMandEMT(pGVM, pVM, idCpu);2457 rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu); 2476 2458 if (RT_SUCCESS(rc)) 2477 2459 { … … 2506 2488 if (uResult == HV_STATUS_SUCCESS) 2507 2489 { 2508 p VM->nem.s.R0Stats.cPagesAvailable = pOutput->PagesAvailable;2509 p VM->nem.s.R0Stats.cPagesInUse = pOutput->PagesInUse;2490 pGVM->nem.s.R0Stats.cPagesAvailable = pOutput->PagesAvailable; 2491 pGVM->nem.s.R0Stats.cPagesInUse = pOutput->PagesInUse; 2510 2492 rc = VINF_SUCCESS; 2511 2493 } … … 2533 2515 * 2534 2516 * @param pGVM The ring-0 VM handle. 2535 * @param pVM The cross context VM handle.2536 2517 * @param idCpu The calling EMT. 2537 2518 * @param u64Arg What to query. 0 == registers. 2538 2519 */ 2539 VMMR0_INT_DECL(int) NEMR0DoExperiment(PGVM pGVM, PVMCC pVM,VMCPUID idCpu, uint64_t u64Arg)2520 VMMR0_INT_DECL(int) NEMR0DoExperiment(PGVM pGVM, VMCPUID idCpu, uint64_t u64Arg) 2540 2521 { 2541 2522 /* 2542 2523 * Resolve CPU structures. 2543 2524 */ 2544 int rc = GVMMR0ValidateGVMand VMandEMT(pGVM, pVM, idCpu);2525 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu); 2545 2526 if (RT_SUCCESS(rc)) 2546 2527 { … … 2548 2529 2549 2530 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu]; 2550 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);2551 2531 if (u64Arg == 0) 2552 2532 { … … 2564 2544 pInput->VpIndex = pGVCpu->idCpu; 2565 2545 pInput->fFlags = 0; 2566 pInput->Names[0] = (HV_REGISTER_NAME)p VCpu->nem.s.Hypercall.Experiment.uItem;2546 pInput->Names[0] = (HV_REGISTER_NAME)pGVCpu->nem.s.Hypercall.Experiment.uItem; 2567 2547 2568 2548 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, 1), 2569 2549 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 2570 2550 pGVCpu->nemr0.s.HypercallData.HCPhysPage + cbInput); 2571 p VCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_MAKE_CALL_REP_RET(1);2572 p VCpu->nem.s.Hypercall.Experiment.uStatus = uResult;2573 p VCpu->nem.s.Hypercall.Experiment.uLoValue = paValues[0].Reg128.Low64;2574 p VCpu->nem.s.Hypercall.Experiment.uHiValue = paValues[0].Reg128.High64;2551 pGVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_MAKE_CALL_REP_RET(1); 2552 pGVCpu->nem.s.Hypercall.Experiment.uStatus = uResult; 2553 pGVCpu->nem.s.Hypercall.Experiment.uLoValue = paValues[0].Reg128.Low64; 2554 pGVCpu->nem.s.Hypercall.Experiment.uHiValue = paValues[0].Reg128.High64; 2575 2555 rc = VINF_SUCCESS; 2576 2556 } … … 2588 2568 2589 2569 pInput->PartitionId = pGVM->nemr0.s.idHvPartition; 2590 pInput->PropertyCode = (HV_PARTITION_PROPERTY_CODE)p VCpu->nem.s.Hypercall.Experiment.uItem;2570 pInput->PropertyCode = (HV_PARTITION_PROPERTY_CODE)pGVCpu->nem.s.Hypercall.Experiment.uItem; 2591 2571 pInput->uPadding = 0; 2592 2572 … … 2594 2574 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 2595 2575 pGVCpu->nemr0.s.HypercallData.HCPhysPage + cbInput); 2596 p VCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_STATUS_SUCCESS;2597 p VCpu->nem.s.Hypercall.Experiment.uStatus = uResult;2598 p VCpu->nem.s.Hypercall.Experiment.uLoValue = pOutput->PropertyValue;2599 p VCpu->nem.s.Hypercall.Experiment.uHiValue = 0;2576 pGVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_STATUS_SUCCESS; 2577 pGVCpu->nem.s.Hypercall.Experiment.uStatus = uResult; 2578 pGVCpu->nem.s.Hypercall.Experiment.uLoValue = pOutput->PropertyValue; 2579 pGVCpu->nem.s.Hypercall.Experiment.uHiValue = 0; 2600 2580 rc = VINF_SUCCESS; 2601 2581 } … … 2612 2592 pInput->VpIndex = pGVCpu->idCpu; 2613 2593 pInput->RsvdZ = 0; 2614 pInput->Elements[0].Name = (HV_REGISTER_NAME)p VCpu->nem.s.Hypercall.Experiment.uItem;2615 pInput->Elements[0].Value.Reg128.High64 = p VCpu->nem.s.Hypercall.Experiment.uHiValue;2616 pInput->Elements[0].Value.Reg128.Low64 = p VCpu->nem.s.Hypercall.Experiment.uLoValue;2594 pInput->Elements[0].Name = (HV_REGISTER_NAME)pGVCpu->nem.s.Hypercall.Experiment.uItem; 2595 pInput->Elements[0].Value.Reg128.High64 = pGVCpu->nem.s.Hypercall.Experiment.uHiValue; 2596 pInput->Elements[0].Value.Reg128.Low64 = pGVCpu->nem.s.Hypercall.Experiment.uLoValue; 2617 2597 2618 2598 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1), 2619 2599 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0); 2620 p VCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_MAKE_CALL_REP_RET(1);2621 p VCpu->nem.s.Hypercall.Experiment.uStatus = uResult;2600 pGVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_MAKE_CALL_REP_RET(1); 2601 pGVCpu->nem.s.Hypercall.Experiment.uStatus = uResult; 2622 2602 rc = VINF_SUCCESS; 2623 2603 } -
trunk/src/VBox/VMM/VMMR0/PDMR0Device.cpp
r80334 r80346 60 60 * Internal Functions * 61 61 *********************************************************************************************************************************/ 62 static bool pdmR0IsaSetIrq(P VMCC pVM, int iIrq, int iLevel, uint32_t uTagSrc);62 static bool pdmR0IsaSetIrq(PGVM pGVM, int iIrq, int iLevel, uint32_t uTagSrc); 63 63 64 64 … … 132 132 LogFlow(("pdmR0DevHlp_PCISetIrq: caller=%p/%d: pPciDev=%p:{%#x} iIrq=%d iLevel=%d\n", 133 133 pDevIns, pDevIns->iInstance, pPciDev, pPciDev->uDevFn, iIrq, iLevel)); 134 P VMCC pVM= pDevIns->Internal.s.pVMR0;134 PGVM pGVM = pDevIns->Internal.s.pVMR0; 135 135 PPDMPCIBUS pPciBus = pPciDev->Int.s.pPdmBusR0; 136 136 137 pdmLock(p VM);137 pdmLock(pGVM); 138 138 uint32_t uTagSrc; 139 139 if (iLevel & PDM_IRQ_LEVEL_HIGH) 140 140 { 141 pDevIns->Internal.s.uLastIrqTag = uTagSrc = pdmCalcIrqTag(p VM, pDevIns->idTracing);141 pDevIns->Internal.s.uLastIrqTag = uTagSrc = pdmCalcIrqTag(pGVM, pDevIns->idTracing); 142 142 if (iLevel == PDM_IRQ_LEVEL_HIGH) 143 VBOXVMM_PDM_IRQ_HIGH(VMMGetCpu(p VM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc));143 VBOXVMM_PDM_IRQ_HIGH(VMMGetCpu(pGVM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc)); 144 144 else 145 VBOXVMM_PDM_IRQ_HILO(VMMGetCpu(p VM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc));145 VBOXVMM_PDM_IRQ_HILO(VMMGetCpu(pGVM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc)); 146 146 } 147 147 else … … 153 153 pPciBus->pfnSetIrqR0(pPciBus->pDevInsR0, pPciDev, iIrq, iLevel, uTagSrc); 154 154 155 pdmUnlock(p VM);155 pdmUnlock(pGVM); 156 156 157 157 if (iLevel == PDM_IRQ_LEVEL_LOW) 158 VBOXVMM_PDM_IRQ_LOW(VMMGetCpu(p VM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc));158 VBOXVMM_PDM_IRQ_LOW(VMMGetCpu(pGVM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc)); 159 159 } 160 160 else 161 161 { 162 pdmUnlock(p VM);162 pdmUnlock(pGVM); 163 163 164 164 /* queue for ring-3 execution. */ 165 PPDMDEVHLPTASK pTask = (PPDMDEVHLPTASK)PDMQueueAlloc(p VM->pdm.s.pDevHlpQueueR0);165 PPDMDEVHLPTASK pTask = (PPDMDEVHLPTASK)PDMQueueAlloc(pGVM->pdm.s.pDevHlpQueueR0); 166 166 AssertReturnVoid(pTask); 167 167 … … 171 171 pTask->u.PciSetIRQ.iLevel = iLevel; 172 172 pTask->u.PciSetIRQ.uTagSrc = uTagSrc; 173 pTask->u.PciSetIRQ.pPciDevR3 = MMHyperR0ToR3(p VM, pPciDev);174 175 PDMQueueInsertEx(p VM->pdm.s.pDevHlpQueueR0, &pTask->Core, 0);173 pTask->u.PciSetIRQ.pPciDevR3 = MMHyperR0ToR3(pGVM, pPciDev); 174 175 PDMQueueInsertEx(pGVM->pdm.s.pDevHlpQueueR0, &pTask->Core, 0); 176 176 } 177 177 … … 185 185 PDMDEV_ASSERT_DEVINS(pDevIns); 186 186 LogFlow(("pdmR0DevHlp_ISASetIrq: caller=%p/%d: iIrq=%d iLevel=%d\n", pDevIns, pDevIns->iInstance, iIrq, iLevel)); 187 P VMCC pVM = pDevIns->Internal.s.pVMR0;188 189 pdmLock(p VM);187 PGVM pGVM = pDevIns->Internal.s.pVMR0; 188 189 pdmLock(pGVM); 190 190 uint32_t uTagSrc; 191 191 if (iLevel & PDM_IRQ_LEVEL_HIGH) 192 192 { 193 pDevIns->Internal.s.uLastIrqTag = uTagSrc = pdmCalcIrqTag(p VM, pDevIns->idTracing);193 pDevIns->Internal.s.uLastIrqTag = uTagSrc = pdmCalcIrqTag(pGVM, pDevIns->idTracing); 194 194 if (iLevel == PDM_IRQ_LEVEL_HIGH) 195 VBOXVMM_PDM_IRQ_HIGH(VMMGetCpu(p VM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc));195 VBOXVMM_PDM_IRQ_HIGH(VMMGetCpu(pGVM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc)); 196 196 else 197 VBOXVMM_PDM_IRQ_HILO(VMMGetCpu(p VM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc));197 VBOXVMM_PDM_IRQ_HILO(VMMGetCpu(pGVM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc)); 198 198 } 199 199 else 200 200 uTagSrc = pDevIns->Internal.s.uLastIrqTag; 201 201 202 bool fRc = pdmR0IsaSetIrq(p VM, iIrq, iLevel, uTagSrc);202 bool fRc = pdmR0IsaSetIrq(pGVM, iIrq, iLevel, uTagSrc); 203 203 204 204 if (iLevel == PDM_IRQ_LEVEL_LOW && fRc) 205 VBOXVMM_PDM_IRQ_LOW(VMMGetCpu(p VM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc));206 pdmUnlock(p VM);205 VBOXVMM_PDM_IRQ_LOW(VMMGetCpu(pGVM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc)); 206 pdmUnlock(pGVM); 207 207 LogFlow(("pdmR0DevHlp_ISASetIrq: caller=%p/%d: returns void; uTagSrc=%#x\n", pDevIns, pDevIns->iInstance, uTagSrc)); 208 208 } … … 214 214 PDMDEV_ASSERT_DEVINS(pDevIns); 215 215 LogFlow(("pdmR0DevHlp_IoApicSendMsi: caller=%p/%d: GCPhys=%RGp uValue=%#x\n", pDevIns, pDevIns->iInstance, GCPhys, uValue)); 216 P VMCC pVM = pDevIns->Internal.s.pVMR0;216 PGVM pGVM = pDevIns->Internal.s.pVMR0; 217 217 218 218 uint32_t uTagSrc; 219 pDevIns->Internal.s.uLastIrqTag = uTagSrc = pdmCalcIrqTag(p VM, pDevIns->idTracing);220 VBOXVMM_PDM_IRQ_HILO(VMMGetCpu(p VM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc));221 222 if (p VM->pdm.s.IoApic.pDevInsR0)223 p VM->pdm.s.IoApic.pfnSendMsiR0(pVM->pdm.s.IoApic.pDevInsR0, GCPhys, uValue, uTagSrc);219 pDevIns->Internal.s.uLastIrqTag = uTagSrc = pdmCalcIrqTag(pGVM, pDevIns->idTracing); 220 VBOXVMM_PDM_IRQ_HILO(VMMGetCpu(pGVM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc)); 221 222 if (pGVM->pdm.s.IoApic.pDevInsR0) 223 pGVM->pdm.s.IoApic.pfnSendMsiR0(pGVM->pdm.s.IoApic.pDevInsR0, GCPhys, uValue, uTagSrc); 224 224 else 225 225 AssertFatalMsgFailed(("Lazy bastards!")); … … 518 518 { 519 519 PDMDEV_ASSERT_DEVINS(pDevIns); 520 P VMCC pVM = pDevIns->Internal.s.pVMR0;520 PGVM pGVM = pDevIns->Internal.s.pVMR0; 521 521 LogFlow(("pdmR0IoApicHlp_ApicBusDeliver: caller=%p/%d: u8Dest=%RX8 u8DestMode=%RX8 u8DeliveryMode=%RX8 uVector=%RX8 u8Polarity=%RX8 u8TriggerMode=%RX8 uTagSrc=%#x\n", 522 522 pDevIns, pDevIns->iInstance, u8Dest, u8DestMode, u8DeliveryMode, uVector, u8Polarity, u8TriggerMode, uTagSrc)); 523 return APICBusDeliver(p VM, u8Dest, u8DestMode, u8DeliveryMode, uVector, u8Polarity, u8TriggerMode, uTagSrc);523 return APICBusDeliver(pGVM, u8Dest, u8DestMode, u8DeliveryMode, uVector, u8Polarity, u8TriggerMode, uTagSrc); 524 524 } 525 525 … … 567 567 PDMDEV_ASSERT_DEVINS(pDevIns); 568 568 Log4(("pdmR0PciHlp_IsaSetIrq: iIrq=%d iLevel=%d uTagSrc=%#x\n", iIrq, iLevel, uTagSrc)); 569 P VMCC pVM = pDevIns->Internal.s.pVMR0;570 571 pdmLock(p VM);572 pdmR0IsaSetIrq(p VM, iIrq, iLevel, uTagSrc);573 pdmUnlock(p VM);569 PGVM pGVM = pDevIns->Internal.s.pVMR0; 570 571 pdmLock(pGVM); 572 pdmR0IsaSetIrq(pGVM, iIrq, iLevel, uTagSrc); 573 pdmUnlock(pGVM); 574 574 } 575 575 … … 580 580 PDMDEV_ASSERT_DEVINS(pDevIns); 581 581 Log4(("pdmR0PciHlp_IoApicSetIrq: iIrq=%d iLevel=%d uTagSrc=%#x\n", iIrq, iLevel, uTagSrc)); 582 P VMCC pVM = pDevIns->Internal.s.pVMR0;583 584 if (p VM->pdm.s.IoApic.pDevInsR0)585 p VM->pdm.s.IoApic.pfnSetIrqR0(pVM->pdm.s.IoApic.pDevInsR0, iIrq, iLevel, uTagSrc);586 else if (p VM->pdm.s.IoApic.pDevInsR3)582 PGVM pGVM = pDevIns->Internal.s.pVMR0; 583 584 if (pGVM->pdm.s.IoApic.pDevInsR0) 585 pGVM->pdm.s.IoApic.pfnSetIrqR0(pGVM->pdm.s.IoApic.pDevInsR0, iIrq, iLevel, uTagSrc); 586 else if (pGVM->pdm.s.IoApic.pDevInsR3) 587 587 { 588 588 /* queue for ring-3 execution. */ 589 PPDMDEVHLPTASK pTask = (PPDMDEVHLPTASK)PDMQueueAlloc(p VM->pdm.s.pDevHlpQueueR0);589 PPDMDEVHLPTASK pTask = (PPDMDEVHLPTASK)PDMQueueAlloc(pGVM->pdm.s.pDevHlpQueueR0); 590 590 if (pTask) 591 591 { … … 596 596 pTask->u.IoApicSetIRQ.uTagSrc = uTagSrc; 597 597 598 PDMQueueInsertEx(p VM->pdm.s.pDevHlpQueueR0, &pTask->Core, 0);598 PDMQueueInsertEx(pGVM->pdm.s.pDevHlpQueueR0, &pTask->Core, 0); 599 599 } 600 600 else … … 609 609 PDMDEV_ASSERT_DEVINS(pDevIns); 610 610 Log4(("pdmR0PciHlp_IoApicSendMsi: GCPhys=%p uValue=%d uTagSrc=%#x\n", GCPhys, uValue, uTagSrc)); 611 P VMCC pVM = pDevIns->Internal.s.pVMR0;612 if (p VM->pdm.s.IoApic.pDevInsR0)613 p VM->pdm.s.IoApic.pfnSendMsiR0(pVM->pdm.s.IoApic.pDevInsR0, GCPhys, uValue, uTagSrc);611 PGVM pGVM = pDevIns->Internal.s.pVMR0; 612 if (pGVM->pdm.s.IoApic.pDevInsR0) 613 pGVM->pdm.s.IoApic.pfnSendMsiR0(pGVM->pdm.s.IoApic.pDevInsR0, GCPhys, uValue, uTagSrc); 614 614 else 615 615 AssertFatalMsgFailed(("Lazy bastards!")); … … 784 784 * 785 785 * @returns true if delivered, false if postponed. 786 * @param p VM The cross contextVM structure.786 * @param pGVM The global (ring-0) VM structure. 787 787 * @param iIrq The irq. 788 788 * @param iLevel The new level. … … 791 791 * @remarks The caller holds the PDM lock. 792 792 */ 793 static bool pdmR0IsaSetIrq(P VMCC pVM, int iIrq, int iLevel, uint32_t uTagSrc)794 { 795 if (RT_LIKELY( ( p VM->pdm.s.IoApic.pDevInsR0796 || !p VM->pdm.s.IoApic.pDevInsR3)797 && ( p VM->pdm.s.Pic.pDevInsR0798 || !p VM->pdm.s.Pic.pDevInsR3)))793 static bool pdmR0IsaSetIrq(PGVM pGVM, int iIrq, int iLevel, uint32_t uTagSrc) 794 { 795 if (RT_LIKELY( ( pGVM->pdm.s.IoApic.pDevInsR0 796 || !pGVM->pdm.s.IoApic.pDevInsR3) 797 && ( pGVM->pdm.s.Pic.pDevInsR0 798 || !pGVM->pdm.s.Pic.pDevInsR3))) 799 799 { 800 if (p VM->pdm.s.Pic.pDevInsR0)801 p VM->pdm.s.Pic.pfnSetIrqR0(pVM->pdm.s.Pic.pDevInsR0, iIrq, iLevel, uTagSrc);802 if (p VM->pdm.s.IoApic.pDevInsR0)803 p VM->pdm.s.IoApic.pfnSetIrqR0(pVM->pdm.s.IoApic.pDevInsR0, iIrq, iLevel, uTagSrc);800 if (pGVM->pdm.s.Pic.pDevInsR0) 801 pGVM->pdm.s.Pic.pfnSetIrqR0(pGVM->pdm.s.Pic.pDevInsR0, iIrq, iLevel, uTagSrc); 802 if (pGVM->pdm.s.IoApic.pDevInsR0) 803 pGVM->pdm.s.IoApic.pfnSetIrqR0(pGVM->pdm.s.IoApic.pDevInsR0, iIrq, iLevel, uTagSrc); 804 804 return true; 805 805 } 806 806 807 807 /* queue for ring-3 execution. */ 808 PPDMDEVHLPTASK pTask = (PPDMDEVHLPTASK)PDMQueueAlloc(p VM->pdm.s.pDevHlpQueueR0);808 PPDMDEVHLPTASK pTask = (PPDMDEVHLPTASK)PDMQueueAlloc(pGVM->pdm.s.pDevHlpQueueR0); 809 809 AssertReturn(pTask, false); 810 810 … … 815 815 pTask->u.IsaSetIRQ.uTagSrc = uTagSrc; 816 816 817 PDMQueueInsertEx(p VM->pdm.s.pDevHlpQueueR0, &pTask->Core, 0);817 PDMQueueInsertEx(pGVM->pdm.s.pDevHlpQueueR0, &pTask->Core, 0); 818 818 return false; 819 819 } … … 825 825 * @returns See PFNPDMDEVREQHANDLERR0. 826 826 * @param pGVM The global (ring-0) VM structure. (For validation.) 827 * @param pVM The cross context VM structure. (For validation.)828 827 * @param pReq Pointer to the request buffer. 829 828 */ 830 VMMR0_INT_DECL(int) PDMR0DeviceCallReqHandler(PGVM pGVM, P VMCC pVM, PPDMDEVICECALLREQHANDLERREQ pReq)829 VMMR0_INT_DECL(int) PDMR0DeviceCallReqHandler(PGVM pGVM, PPDMDEVICECALLREQHANDLERREQ pReq) 831 830 { 832 831 /* 833 832 * Validate input and make the call. 834 833 */ 835 int rc = GVMMR0ValidateGVM andVM(pGVM, pVM);834 int rc = GVMMR0ValidateGVM(pGVM); 836 835 if (RT_SUCCESS(rc)) 837 836 { … … 841 840 PPDMDEVINS pDevIns = pReq->pDevInsR0; 842 841 AssertPtrReturn(pDevIns, VERR_INVALID_POINTER); 843 AssertReturn(pDevIns->Internal.s.pVMR0 == p VM, VERR_INVALID_PARAMETER);842 AssertReturn(pDevIns->Internal.s.pVMR0 == pGVM, VERR_INVALID_PARAMETER); 844 843 845 844 PFNPDMDEVREQHANDLERR0 pfnReqHandlerR0 = pReq->pfnReqHandlerR0; -
trunk/src/VBox/VMM/VMMR0/PDMR0Driver.cpp
r80333 r80346 37 37 * @returns See PFNPDMDRVREQHANDLERR0. 38 38 * @param pGVM The global (ring-0) VM structure. (For validation.) 39 * @param pVM The cross context VM structure. (For validation.)40 39 * @param pReq Pointer to the request buffer. 41 40 */ 42 VMMR0_INT_DECL(int) PDMR0DriverCallReqHandler(PGVM pGVM, P VMCC pVM, PPDMDRIVERCALLREQHANDLERREQ pReq)41 VMMR0_INT_DECL(int) PDMR0DriverCallReqHandler(PGVM pGVM, PPDMDRIVERCALLREQHANDLERREQ pReq) 43 42 { 44 43 /* 45 44 * Validate input and make the call. 46 45 */ 47 int rc = GVMMR0ValidateGVM andVM(pGVM, pVM);46 int rc = GVMMR0ValidateGVM(pGVM); 48 47 if (RT_SUCCESS(rc)) 49 48 { … … 53 52 PPDMDRVINS pDrvIns = pReq->pDrvInsR0; 54 53 AssertPtrReturn(pDrvIns, VERR_INVALID_POINTER); 55 AssertReturn(pDrvIns->Internal.s.pVMR0 == p VM, VERR_INVALID_PARAMETER);54 AssertReturn(pDrvIns->Internal.s.pVMR0 == pGVM, VERR_INVALID_PARAMETER); 56 55 57 56 PFNPDMDRVREQHANDLERR0 pfnReqHandlerR0 = pDrvIns->Internal.s.pfnReqHandlerR0; -
trunk/src/VBox/VMM/VMMR0/PGMR0.cpp
r80334 r80346 63 63 * 64 64 * @param pGVM The global (ring-0) VM structure. 65 * @param pVM The cross context VM structure.66 65 * @param idCpu The ID of the calling EMT. 67 66 * … … 71 70 * must clear the new pages. 72 71 */ 73 VMMR0_INT_DECL(int) PGMR0PhysAllocateHandyPages(PGVM pGVM, PVMCC pVM,VMCPUID idCpu)72 VMMR0_INT_DECL(int) PGMR0PhysAllocateHandyPages(PGVM pGVM, VMCPUID idCpu) 74 73 { 75 74 /* … … 78 77 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID); /* caller already checked this, but just to be sure. */ 79 78 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_NOT_OWNER); 80 PGM_LOCK_ASSERT_OWNER_EX(p VM, &pGVM->aCpus[idCpu]);79 PGM_LOCK_ASSERT_OWNER_EX(pGVM, &pGVM->aCpus[idCpu]); 81 80 82 81 /* 83 82 * Check for error injection. 84 83 */ 85 if (RT_UNLIKELY(p VM->pgm.s.fErrInjHandyPages))84 if (RT_UNLIKELY(pGVM->pgm.s.fErrInjHandyPages)) 86 85 return VERR_NO_MEMORY; 87 86 … … 89 88 * Try allocate a full set of handy pages. 90 89 */ 91 uint32_t iFirst = p VM->pgm.s.cHandyPages;92 AssertReturn(iFirst <= RT_ELEMENTS(p VM->pgm.s.aHandyPages), VERR_PGM_HANDY_PAGE_IPE);93 uint32_t cPages = RT_ELEMENTS(p VM->pgm.s.aHandyPages) - iFirst;90 uint32_t iFirst = pGVM->pgm.s.cHandyPages; 91 AssertReturn(iFirst <= RT_ELEMENTS(pGVM->pgm.s.aHandyPages), VERR_PGM_HANDY_PAGE_IPE); 92 uint32_t cPages = RT_ELEMENTS(pGVM->pgm.s.aHandyPages) - iFirst; 94 93 if (!cPages) 95 94 return VINF_SUCCESS; 96 int rc = GMMR0AllocateHandyPages(pGVM, pVM, idCpu, cPages, cPages, &pVM->pgm.s.aHandyPages[iFirst]);95 int rc = GMMR0AllocateHandyPages(pGVM, idCpu, cPages, cPages, &pGVM->pgm.s.aHandyPages[iFirst]); 97 96 if (RT_SUCCESS(rc)) 98 97 { 99 98 #ifdef VBOX_STRICT 100 for (uint32_t i = 0; i < RT_ELEMENTS(p VM->pgm.s.aHandyPages); i++)101 { 102 Assert(p VM->pgm.s.aHandyPages[i].idPage != NIL_GMM_PAGEID);103 Assert(p VM->pgm.s.aHandyPages[i].idPage <= GMM_PAGEID_LAST);104 Assert(p VM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);105 Assert(p VM->pgm.s.aHandyPages[i].HCPhysGCPhys != NIL_RTHCPHYS);106 Assert(!(p VM->pgm.s.aHandyPages[i].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));99 for (uint32_t i = 0; i < RT_ELEMENTS(pGVM->pgm.s.aHandyPages); i++) 100 { 101 Assert(pGVM->pgm.s.aHandyPages[i].idPage != NIL_GMM_PAGEID); 102 Assert(pGVM->pgm.s.aHandyPages[i].idPage <= GMM_PAGEID_LAST); 103 Assert(pGVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID); 104 Assert(pGVM->pgm.s.aHandyPages[i].HCPhysGCPhys != NIL_RTHCPHYS); 105 Assert(!(pGVM->pgm.s.aHandyPages[i].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK)); 107 106 } 108 107 #endif 109 108 110 p VM->pgm.s.cHandyPages = RT_ELEMENTS(pVM->pgm.s.aHandyPages);109 pGVM->pgm.s.cHandyPages = RT_ELEMENTS(pGVM->pgm.s.aHandyPages); 111 110 } 112 111 else if (rc != VERR_GMM_SEED_ME) … … 120 119 /* We're ASSUMING that GMM has updated all the entires before failing us. */ 121 120 uint32_t i; 122 for (i = iFirst; i < RT_ELEMENTS(p VM->pgm.s.aHandyPages); i++)121 for (i = iFirst; i < RT_ELEMENTS(pGVM->pgm.s.aHandyPages); i++) 123 122 { 124 Assert(p VM->pgm.s.aHandyPages[i].idPage == NIL_GMM_PAGEID);125 Assert(p VM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);126 Assert(p VM->pgm.s.aHandyPages[i].HCPhysGCPhys == NIL_RTHCPHYS);123 Assert(pGVM->pgm.s.aHandyPages[i].idPage == NIL_GMM_PAGEID); 124 Assert(pGVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID); 125 Assert(pGVM->pgm.s.aHandyPages[i].HCPhysGCPhys == NIL_RTHCPHYS); 127 126 } 128 127 #endif … … 136 135 if (cPages + iFirst < PGM_HANDY_PAGES_MIN) 137 136 cPages = PGM_HANDY_PAGES_MIN - iFirst; 138 rc = GMMR0AllocateHandyPages(pGVM, pVM, idCpu, 0, cPages, &pVM->pgm.s.aHandyPages[iFirst]);137 rc = GMMR0AllocateHandyPages(pGVM, idCpu, 0, cPages, &pGVM->pgm.s.aHandyPages[iFirst]); 139 138 } while ( ( rc == VERR_GMM_HIT_GLOBAL_LIMIT 140 139 || rc == VERR_GMM_HIT_VM_ACCOUNT_LIMIT) … … 146 145 while (i-- > 0) 147 146 { 148 Assert(p VM->pgm.s.aHandyPages[i].idPage != NIL_GMM_PAGEID);149 Assert(p VM->pgm.s.aHandyPages[i].idPage <= GMM_PAGEID_LAST);150 Assert(p VM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);151 Assert(p VM->pgm.s.aHandyPages[i].HCPhysGCPhys != NIL_RTHCPHYS);152 Assert(!(p VM->pgm.s.aHandyPages[i].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));147 Assert(pGVM->pgm.s.aHandyPages[i].idPage != NIL_GMM_PAGEID); 148 Assert(pGVM->pgm.s.aHandyPages[i].idPage <= GMM_PAGEID_LAST); 149 Assert(pGVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID); 150 Assert(pGVM->pgm.s.aHandyPages[i].HCPhysGCPhys != NIL_RTHCPHYS); 151 Assert(!(pGVM->pgm.s.aHandyPages[i].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK)); 153 152 } 154 153 155 for (i = cPages + iFirst; i < RT_ELEMENTS(p VM->pgm.s.aHandyPages); i++)154 for (i = cPages + iFirst; i < RT_ELEMENTS(pGVM->pgm.s.aHandyPages); i++) 156 155 { 157 Assert(p VM->pgm.s.aHandyPages[i].idPage == NIL_GMM_PAGEID);158 Assert(p VM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);159 Assert(p VM->pgm.s.aHandyPages[i].HCPhysGCPhys == NIL_RTHCPHYS);156 Assert(pGVM->pgm.s.aHandyPages[i].idPage == NIL_GMM_PAGEID); 157 Assert(pGVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID); 158 Assert(pGVM->pgm.s.aHandyPages[i].HCPhysGCPhys == NIL_RTHCPHYS); 160 159 } 161 160 #endif 162 161 163 p VM->pgm.s.cHandyPages = iFirst + cPages;162 pGVM->pgm.s.cHandyPages = iFirst + cPages; 164 163 } 165 164 } … … 168 167 { 169 168 LogRel(("PGMR0PhysAllocateHandyPages: rc=%Rrc iFirst=%d cPages=%d\n", rc, iFirst, cPages)); 170 VM_FF_SET(p VM, VM_FF_PGM_NO_MEMORY);169 VM_FF_SET(pGVM, VM_FF_PGM_NO_MEMORY); 171 170 } 172 171 } … … 187 186 * 188 187 * @param pGVM The global (ring-0) VM structure. 189 * @param pVM The cross context VM structure.190 188 * @param idCpu The ID of the calling EMT. 191 189 * … … 194 192 * @remarks Must be called from within the PGM critical section. 195 193 */ 196 VMMR0_INT_DECL(int) PGMR0PhysFlushHandyPages(PGVM pGVM, PVMCC pVM,VMCPUID idCpu)194 VMMR0_INT_DECL(int) PGMR0PhysFlushHandyPages(PGVM pGVM, VMCPUID idCpu) 197 195 { 198 196 /* … … 201 199 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID); /* caller already checked this, but just to be sure. */ 202 200 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_NOT_OWNER); 203 PGM_LOCK_ASSERT_OWNER_EX(p VM, &pGVM->aCpus[idCpu]);201 PGM_LOCK_ASSERT_OWNER_EX(pGVM, &pGVM->aCpus[idCpu]); 204 202 205 203 /* 206 204 * Try allocate a full set of handy pages. 207 205 */ 208 uint32_t iFirst = p VM->pgm.s.cHandyPages;209 AssertReturn(iFirst <= RT_ELEMENTS(p VM->pgm.s.aHandyPages), VERR_PGM_HANDY_PAGE_IPE);210 uint32_t cPages = RT_ELEMENTS(p VM->pgm.s.aHandyPages) - iFirst;206 uint32_t iFirst = pGVM->pgm.s.cHandyPages; 207 AssertReturn(iFirst <= RT_ELEMENTS(pGVM->pgm.s.aHandyPages), VERR_PGM_HANDY_PAGE_IPE); 208 uint32_t cPages = RT_ELEMENTS(pGVM->pgm.s.aHandyPages) - iFirst; 211 209 if (!cPages) 212 210 return VINF_SUCCESS; 213 int rc = GMMR0AllocateHandyPages(pGVM, pVM, idCpu, cPages, 0, &pVM->pgm.s.aHandyPages[iFirst]);211 int rc = GMMR0AllocateHandyPages(pGVM, idCpu, cPages, 0, &pGVM->pgm.s.aHandyPages[iFirst]); 214 212 215 213 LogFlow(("PGMR0PhysFlushHandyPages: cPages=%d rc=%Rrc\n", cPages, rc)); … … 226 224 * 227 225 * @param pGVM The global (ring-0) VM structure. 228 * @param pVM The cross context VM structure.229 226 * @param idCpu The ID of the calling EMT. 230 227 * … … 234 231 * must clear the new pages. 235 232 */ 236 VMMR0_INT_DECL(int) PGMR0PhysAllocateLargeHandyPage(PGVM pGVM, PVMCC pVM,VMCPUID idCpu)233 VMMR0_INT_DECL(int) PGMR0PhysAllocateLargeHandyPage(PGVM pGVM, VMCPUID idCpu) 237 234 { 238 235 /* … … 241 238 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID); /* caller already checked this, but just to be sure. */ 242 239 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_NOT_OWNER); 243 PGM_LOCK_ASSERT_OWNER_EX(p VM, &pGVM->aCpus[idCpu]);244 Assert(!p VM->pgm.s.cLargeHandyPages);240 PGM_LOCK_ASSERT_OWNER_EX(pGVM, &pGVM->aCpus[idCpu]); 241 Assert(!pGVM->pgm.s.cLargeHandyPages); 245 242 246 243 /* 247 244 * Do the job. 248 245 */ 249 int rc = GMMR0AllocateLargePage(pGVM, pVM,idCpu, _2M,250 &p VM->pgm.s.aLargeHandyPage[0].idPage,251 &p VM->pgm.s.aLargeHandyPage[0].HCPhysGCPhys);246 int rc = GMMR0AllocateLargePage(pGVM, idCpu, _2M, 247 &pGVM->pgm.s.aLargeHandyPage[0].idPage, 248 &pGVM->pgm.s.aLargeHandyPage[0].HCPhysGCPhys); 252 249 if (RT_SUCCESS(rc)) 253 p VM->pgm.s.cLargeHandyPages = 1;250 pGVM->pgm.s.cLargeHandyPages = 1; 254 251 255 252 return rc; … … 383 380 * 384 381 * @param pGVM The global (ring-0) VM structure. 385 * @param pVM The cross context VM structure. 386 */ 387 VMMR0_INT_DECL(int) PGMR0PhysSetupIoMmu(PGVM pGVM, PVMCC pVM) 388 { 389 int rc = GVMMR0ValidateGVMandVM(pGVM, pVM); 382 */ 383 VMMR0_INT_DECL(int) PGMR0PhysSetupIoMmu(PGVM pGVM) 384 { 385 int rc = GVMMR0ValidateGVM(pGVM); 390 386 if (RT_FAILURE(rc)) 391 387 return rc; 392 388 393 389 #ifdef VBOX_WITH_PCI_PASSTHROUGH 394 if (p VM->pgm.s.fPciPassthrough)390 if (pGVM->pgm.s.fPciPassthrough) 395 391 { 396 392 /* … … 398 394 * IOMMU about each of them. 399 395 */ 400 pgmLock(p VM);396 pgmLock(pGVM); 401 397 rc = GPciRawR0GuestPageBeginAssignments(pGVM); 402 398 if (RT_SUCCESS(rc)) 403 399 { 404 for (PPGMRAMRANGE pRam = p VM->pgm.s.pRamRangesXR0; RT_SUCCESS(rc) && pRam; pRam = pRam->pNextR0)400 for (PPGMRAMRANGE pRam = pGVM->pgm.s.pRamRangesXR0; RT_SUCCESS(rc) && pRam; pRam = pRam->pNextR0) 405 401 { 406 402 PPGMPAGE pPage = &pRam->aPages[0]; … … 427 423 rc = rc2; 428 424 } 429 pgmUnlock(p VM);425 pgmUnlock(pGVM); 430 426 } 431 427 else … … 440 436 * 441 437 * @returns VBox status code (appropriate for trap handling and GC return). 442 * @param pVM The cross context VM structure. 443 * @param pVCpu The cross context virtual CPU structure. 438 * @param pGVM The global (ring-0) VM structure. 439 * @param pGVCpu The global (ring-0) CPU structure of the calling 440 * EMT. 444 441 * @param enmShwPagingMode Paging mode for the nested page tables. 445 442 * @param uErr The trap error code. … … 447 444 * @param GCPhysFault The fault address. 448 445 */ 449 VMMR0DECL(int) PGMR0Trap0eHandlerNestedPaging(P VMCC pVM, PVMCPUCC pVCpu, PGMMODE enmShwPagingMode, RTGCUINT uErr,446 VMMR0DECL(int) PGMR0Trap0eHandlerNestedPaging(PGVM pGVM, PGVMCPU pGVCpu, PGMMODE enmShwPagingMode, RTGCUINT uErr, 450 447 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault) 451 448 { … … 453 450 454 451 LogFlow(("PGMTrap0eHandler: uErr=%RGx GCPhysFault=%RGp eip=%RGv\n", uErr, GCPhysFault, (RTGCPTR)pRegFrame->rip)); 455 STAM_PROFILE_START(&p VCpu->pgm.s.StatRZTrap0e, a);456 STAM_STATS({ p VCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );452 STAM_PROFILE_START(&pGVCpu->pgm.s.StatRZTrap0e, a); 453 STAM_STATS({ pGVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } ); 457 454 458 455 /* AMD uses the host's paging mode; Intel has a single mode (EPT). */ … … 473 470 { 474 471 if (uErr & X86_TRAP_PF_RW) 475 STAM_COUNTER_INC(&p VCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentWrite);472 STAM_COUNTER_INC(&pGVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentWrite); 476 473 else 477 STAM_COUNTER_INC(&p VCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentRead);474 STAM_COUNTER_INC(&pGVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentRead); 478 475 } 479 476 else if (uErr & X86_TRAP_PF_RW) 480 STAM_COUNTER_INC(&p VCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSWrite);477 STAM_COUNTER_INC(&pGVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSWrite); 481 478 else if (uErr & X86_TRAP_PF_RSVD) 482 STAM_COUNTER_INC(&p VCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSReserved);479 STAM_COUNTER_INC(&pGVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSReserved); 483 480 else if (uErr & X86_TRAP_PF_ID) 484 STAM_COUNTER_INC(&p VCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNXE);481 STAM_COUNTER_INC(&pGVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNXE); 485 482 else 486 STAM_COUNTER_INC(&p VCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSRead);483 STAM_COUNTER_INC(&pGVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSRead); 487 484 } 488 485 else … … 491 488 { 492 489 if (uErr & X86_TRAP_PF_RW) 493 STAM_COUNTER_INC(&p VCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentWrite);490 STAM_COUNTER_INC(&pGVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentWrite); 494 491 else 495 STAM_COUNTER_INC(&p VCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentRead);492 STAM_COUNTER_INC(&pGVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentRead); 496 493 } 497 494 else if (uErr & X86_TRAP_PF_RW) 498 STAM_COUNTER_INC(&p VCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVWrite);495 STAM_COUNTER_INC(&pGVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVWrite); 499 496 else if (uErr & X86_TRAP_PF_ID) 500 STAM_COUNTER_INC(&p VCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSNXE);497 STAM_COUNTER_INC(&pGVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSNXE); 501 498 else if (uErr & X86_TRAP_PF_RSVD) 502 STAM_COUNTER_INC(&p VCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVReserved);499 STAM_COUNTER_INC(&pGVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVReserved); 503 500 } 504 501 #endif … … 515 512 { 516 513 case PGMMODE_32_BIT: 517 rc = PGM_BTH_NAME_32BIT_PROT(Trap0eHandler)(p VCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);514 rc = PGM_BTH_NAME_32BIT_PROT(Trap0eHandler)(pGVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken); 518 515 break; 519 516 case PGMMODE_PAE: 520 517 case PGMMODE_PAE_NX: 521 rc = PGM_BTH_NAME_PAE_PROT(Trap0eHandler)(p VCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);518 rc = PGM_BTH_NAME_PAE_PROT(Trap0eHandler)(pGVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken); 522 519 break; 523 520 case PGMMODE_AMD64: 524 521 case PGMMODE_AMD64_NX: 525 rc = PGM_BTH_NAME_AMD64_PROT(Trap0eHandler)(p VCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);522 rc = PGM_BTH_NAME_AMD64_PROT(Trap0eHandler)(pGVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken); 526 523 break; 527 524 case PGMMODE_EPT: 528 rc = PGM_BTH_NAME_EPT_PROT(Trap0eHandler)(p VCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);525 rc = PGM_BTH_NAME_EPT_PROT(Trap0eHandler)(pGVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken); 529 526 break; 530 527 default: … … 535 532 if (fLockTaken) 536 533 { 537 PGM_LOCK_ASSERT_OWNER(p VM);538 pgmUnlock(p VM);534 PGM_LOCK_ASSERT_OWNER(pGVM); 535 pgmUnlock(pGVM); 539 536 } 540 537 … … 556 553 } 557 554 558 STAM_STATS({ if (!p VCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution))559 p VCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2Misc; });560 STAM_PROFILE_STOP_EX(&p VCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);555 STAM_STATS({ if (!pGVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution)) 556 pGVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pGVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2Misc; }); 557 STAM_PROFILE_STOP_EX(&pGVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, pGVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a); 561 558 return rc; 562 559 } … … 568 565 * 569 566 * @returns VBox status code (appropriate for trap handling and GC return). 570 * @param pVM The cross context VM structure. 571 * @param pVCpu The cross context virtual CPU structure. 567 * @param pGVM The global (ring-0) VM structure. 568 * @param pGVCpu The global (ring-0) CPU structure of the calling 569 * EMT. 572 570 * @param enmShwPagingMode Paging mode for the nested page tables. 573 571 * @param pRegFrame Trap register frame. … … 576 574 * (VT-x). 577 575 */ 578 VMMR0DECL(VBOXSTRICTRC) PGMR0Trap0eHandlerNPMisconfig(P VMCC pVM, PVMCPUCC pVCpu, PGMMODE enmShwPagingMode,576 VMMR0DECL(VBOXSTRICTRC) PGMR0Trap0eHandlerNPMisconfig(PGVM pGVM, PGVMCPU pGVCpu, PGMMODE enmShwPagingMode, 579 577 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, uint32_t uErr) 580 578 { 581 579 #ifdef PGM_WITH_MMIO_OPTIMIZATIONS 582 STAM_PROFILE_START(&p VCpu->CTX_SUFF(pStats)->StatR0NpMiscfg, a);580 STAM_PROFILE_START(&pGVCpu->CTX_SUFF(pStats)->StatR0NpMiscfg, a); 583 581 VBOXSTRICTRC rc; 584 582 … … 586 584 * Try lookup the all access physical handler for the address. 587 585 */ 588 pgmLock(p VM);589 PPGMPHYSHANDLER pHandler = pgmHandlerPhysicalLookup(p VM, GCPhysFault);590 PPGMPHYSHANDLERTYPEINT pHandlerType = RT_LIKELY(pHandler) ? PGMPHYSHANDLER_GET_TYPE(p VM, pHandler) : NULL;586 pgmLock(pGVM); 587 PPGMPHYSHANDLER pHandler = pgmHandlerPhysicalLookup(pGVM, GCPhysFault); 588 PPGMPHYSHANDLERTYPEINT pHandlerType = RT_LIKELY(pHandler) ? PGMPHYSHANDLER_GET_TYPE(pGVM, pHandler) : NULL; 591 589 if (RT_LIKELY(pHandler && pHandlerType->enmKind != PGMPHYSHANDLERKIND_WRITE)) 592 590 { … … 599 597 if ( ( pHandler->cAliasedPages 600 598 || pHandler->cTmpOffPages) 601 && ( (pPage = pgmPhysGetPage(p VM, GCPhysFault)) == NULL599 && ( (pPage = pgmPhysGetPage(pGVM, GCPhysFault)) == NULL 602 600 || PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_DISABLED) 603 601 ) 604 602 { 605 603 Log(("PGMR0Trap0eHandlerNPMisconfig: Resyncing aliases / tmp-off page at %RGp (uErr=%#x) %R[pgmpage]\n", GCPhysFault, uErr, pPage)); 606 STAM_COUNTER_INC(&p VCpu->pgm.s.CTX_SUFF(pStats)->StatR0NpMiscfgSyncPage);607 rc = pgmShwSyncNestedPageLocked(p VCpu, GCPhysFault, 1 /*cPages*/, enmShwPagingMode);608 pgmUnlock(p VM);604 STAM_COUNTER_INC(&pGVCpu->pgm.s.CTX_SUFF(pStats)->StatR0NpMiscfgSyncPage); 605 rc = pgmShwSyncNestedPageLocked(pGVCpu, GCPhysFault, 1 /*cPages*/, enmShwPagingMode); 606 pgmUnlock(pGVM); 609 607 } 610 608 else … … 614 612 void *pvUser = pHandler->CTX_SUFF(pvUser); 615 613 STAM_PROFILE_START(&pHandler->Stat, h); 616 pgmUnlock(p VM);614 pgmUnlock(pGVM); 617 615 618 616 Log6(("PGMR0Trap0eHandlerNPMisconfig: calling %p(,%#x,,%RGp,%p)\n", pHandlerType->CTX_SUFF(pfnPfHandler), uErr, GCPhysFault, pvUser)); 619 rc = pHandlerType->CTX_SUFF(pfnPfHandler)(p VM, pVCpu, uErr == UINT32_MAX ? RTGCPTR_MAX : uErr, pRegFrame,617 rc = pHandlerType->CTX_SUFF(pfnPfHandler)(pGVM, pGVCpu, uErr == UINT32_MAX ? RTGCPTR_MAX : uErr, pRegFrame, 620 618 GCPhysFault, GCPhysFault, pvUser); 621 619 622 620 #ifdef VBOX_WITH_STATISTICS 623 pgmLock(p VM);624 pHandler = pgmHandlerPhysicalLookup(p VM, GCPhysFault);621 pgmLock(pGVM); 622 pHandler = pgmHandlerPhysicalLookup(pGVM, GCPhysFault); 625 623 if (pHandler) 626 624 STAM_PROFILE_STOP(&pHandler->Stat, h); 627 pgmUnlock(p VM);625 pgmUnlock(pGVM); 628 626 #endif 629 627 } 630 628 else 631 629 { 632 pgmUnlock(p VM);630 pgmUnlock(pGVM); 633 631 Log(("PGMR0Trap0eHandlerNPMisconfig: %RGp (uErr=%#x) -> R3\n", GCPhysFault, uErr)); 634 632 rc = VINF_EM_RAW_EMULATE_INSTR; … … 645 643 */ 646 644 Log(("PGMR0Trap0eHandlerNPMisconfig: Out of sync page at %RGp (uErr=%#x)\n", GCPhysFault, uErr)); 647 STAM_COUNTER_INC(&p VCpu->pgm.s.CTX_SUFF(pStats)->StatR0NpMiscfgSyncPage);648 rc = pgmShwSyncNestedPageLocked(p VCpu, GCPhysFault, 1 /*cPages*/, enmShwPagingMode);649 pgmUnlock(p VM);650 } 651 652 STAM_PROFILE_STOP(&p VCpu->pgm.s.CTX_SUFF(pStats)->StatR0NpMiscfg, a);645 STAM_COUNTER_INC(&pGVCpu->pgm.s.CTX_SUFF(pStats)->StatR0NpMiscfgSyncPage); 646 rc = pgmShwSyncNestedPageLocked(pGVCpu, GCPhysFault, 1 /*cPages*/, enmShwPagingMode); 647 pgmUnlock(pGVM); 648 } 649 650 STAM_PROFILE_STOP(&pGVCpu->pgm.s.CTX_SUFF(pStats)->StatR0NpMiscfg, a); 653 651 return rc; 654 652 -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r80334 r80346 103 103 } \ 104 104 } while (0) 105 # define VMM_CHECK_SMAP_CHECK2(a_p VM, a_BadExpr) \105 # define VMM_CHECK_SMAP_CHECK2(a_pGVM, a_BadExpr) \ 106 106 do { \ 107 107 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \ … … 110 110 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \ 111 111 { /* likely */ } \ 112 else if (a_pGVM) \ 113 { \ 114 SUPR0BadContext((a_pGVM)->pSession, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \ 115 RTStrPrintf((a_pGVM)->vmm.s.szRing0AssertMsg1, sizeof((a_pGVM)->vmm.s.szRing0AssertMsg1), \ 116 "%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \ 117 a_BadExpr; \ 118 } \ 112 119 else \ 113 120 { \ 114 SUPR0BadContext((a_pVM) ? (a_pVM)->pSession : NULL, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \ 115 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1), \ 116 "%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \ 121 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \ 117 122 a_BadExpr; \ 118 123 } \ … … 120 125 } while (0) 121 126 #else 122 # define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0123 # define VMM_CHECK_SMAP_CHECK(a_BadExpr) NOREF(fKernelFeatures)124 # define VMM_CHECK_SMAP_CHECK2(a_p VM, a_BadExpr)NOREF(fKernelFeatures)127 # define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0 128 # define VMM_CHECK_SMAP_CHECK(a_BadExpr) NOREF(fKernelFeatures) 129 # define VMM_CHECK_SMAP_CHECK2(a_pGVM, a_BadExpr) NOREF(fKernelFeatures) 125 130 #endif 126 131 … … 361 366 * 362 367 * @param pGVM The global (ring-0) VM structure. 363 * @param pVM The cross context VM structure.364 368 * @param uSvnRev The SVN revision of the ring-3 part. 365 369 * @param uBuildType Build type indicator. 366 370 * @thread EMT(0) 367 371 */ 368 static int vmmR0InitVM(PGVM pGVM, PVMCC pVM,uint32_t uSvnRev, uint32_t uBuildType)372 static int vmmR0InitVM(PGVM pGVM, uint32_t uSvnRev, uint32_t uBuildType) 369 373 { 370 374 VMM_CHECK_SMAP_SETUP(); … … 387 391 } 388 392 389 int rc = GVMMR0ValidateGVMand VMandEMT(pGVM, pVM, 0 /*idCpu*/);393 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0 /*idCpu*/); 390 394 if (RT_FAILURE(rc)) 391 395 return rc; … … 395 399 * Register the EMT R0 logger instance for VCPU 0. 396 400 */ 397 PVMCPUCC pVCpu = VMCC_GET_CPU_0(p VM);401 PVMCPUCC pVCpu = VMCC_GET_CPU_0(pGVM); 398 402 399 403 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0; … … 406 410 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags)); 407 411 408 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)p VM->pSession);412 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession); 409 413 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance())); 410 RTLogSetDefaultInstanceThread(NULL, p VM->pSession);414 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession); 411 415 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance())); 412 416 … … 416 420 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n")); 417 421 418 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)p VM->pSession);422 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession); 419 423 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance())); 420 424 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n"); 421 425 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch)); 422 RTLogSetDefaultInstanceThread(NULL, p VM->pSession);426 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession); 423 427 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance())); 424 428 … … 426 430 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch)); 427 431 428 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)p VM->pSession);432 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession); 429 433 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n"); 430 434 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch)); 431 435 # endif 432 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, p VM->pSession));433 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)p VM->pSession);436 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pGVM->pSession)); 437 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession); 434 438 pR0Logger->fRegistered = true; 435 439 } … … 439 443 * Check if the host supports high resolution timers or not. 440 444 */ 441 if ( p VM->vmm.s.fUsePeriodicPreemptionTimers445 if ( pGVM->vmm.s.fUsePeriodicPreemptionTimers 442 446 && !RTTimerCanDoHighResolution()) 443 p VM->vmm.s.fUsePeriodicPreemptionTimers = false;447 pGVM->vmm.s.fUsePeriodicPreemptionTimers = false; 444 448 445 449 /* 446 450 * Initialize the per VM data for GVMM and GMM. 447 451 */ 448 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);452 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 449 453 rc = GVMMR0InitVM(pGVM); 450 // if (RT_SUCCESS(rc))451 // rc = GMMR0InitPerVMData(pVM);452 454 if (RT_SUCCESS(rc)) 453 455 { … … 455 457 * Init HM, CPUM and PGM (Darwin only). 456 458 */ 457 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);458 rc = HMR0InitVM(p VM);459 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 460 rc = HMR0InitVM(pGVM); 459 461 if (RT_SUCCESS(rc)) 460 VMM_CHECK_SMAP_CHECK2(p VM, rc = VERR_VMM_RING0_ASSERTION); /* CPUR0InitVM will otherwise panic the host */462 VMM_CHECK_SMAP_CHECK2(pGVM, rc = VERR_VMM_RING0_ASSERTION); /* CPUR0InitVM will otherwise panic the host */ 461 463 if (RT_SUCCESS(rc)) 462 464 { 463 rc = CPUMR0InitVM(p VM);465 rc = CPUMR0InitVM(pGVM); 464 466 if (RT_SUCCESS(rc)) 465 467 { 466 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);468 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 467 469 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE 468 rc = PGMR0DynMapInitVM(p VM);470 rc = PGMR0DynMapInitVM(pGVM); 469 471 #endif 470 472 if (RT_SUCCESS(rc)) 471 473 { 472 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);474 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 473 475 rc = EMR0InitVM(pGVM); 474 476 if (RT_SUCCESS(rc)) 475 477 { 476 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);478 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 477 479 #ifdef VBOX_WITH_PCI_PASSTHROUGH 478 rc = PciRawR0InitVM(pGVM , pVM);480 rc = PciRawR0InitVM(pGVM); 479 481 #endif 480 482 if (RT_SUCCESS(rc)) 481 483 { 482 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);483 rc = GIMR0InitVM(p VM);484 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 485 rc = GIMR0InitVM(pGVM); 484 486 if (RT_SUCCESS(rc)) 485 487 { 486 VMM_CHECK_SMAP_CHECK2(p VM, rc = VERR_VMM_RING0_ASSERTION);488 VMM_CHECK_SMAP_CHECK2(pGVM, rc = VERR_VMM_RING0_ASSERTION); 487 489 if (RT_SUCCESS(rc)) 488 490 { … … 492 494 * Collect a bit of info for the VM release log. 493 495 */ 494 p VM->vmm.s.fIsPreemptPendingApiTrusty = RTThreadPreemptIsPendingTrusty();495 p VM->vmm.s.fIsPreemptPossible = RTThreadPreemptIsPossible();;496 497 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);496 pGVM->vmm.s.fIsPreemptPendingApiTrusty = RTThreadPreemptIsPendingTrusty(); 497 pGVM->vmm.s.fIsPreemptPossible = RTThreadPreemptIsPossible();; 498 499 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 498 500 return rc; 499 501 } 500 502 501 503 /* bail out*/ 502 GIMR0TermVM(p VM);504 GIMR0TermVM(pGVM); 503 505 } 504 506 #ifdef VBOX_WITH_PCI_PASSTHROUGH 505 PciRawR0TermVM(pGVM , pVM);507 PciRawR0TermVM(pGVM); 506 508 #endif 507 509 } … … 509 511 } 510 512 } 511 HMR0TermVM(p VM);513 HMR0TermVM(pGVM); 512 514 } 513 515 } 514 516 515 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)p VM->pSession);517 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession); 516 518 return rc; 517 519 } … … 523 525 * @returns VBox status code. 524 526 * @param pGVM The ring-0 VM structure. 525 * @param pVM The cross context VM structure.526 527 * @param idCpu The EMT that's calling. 527 528 */ 528 static int vmmR0InitVMEmt(PGVM pGVM, PVMCC pVM,VMCPUID idCpu)529 static int vmmR0InitVMEmt(PGVM pGVM, VMCPUID idCpu) 529 530 { 530 531 /* Paranoia (caller checked these already). */ … … 541 542 && !pR0Logger->fRegistered) 542 543 { 543 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)p VM->pSession);544 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession); 544 545 pR0Logger->fRegistered = true; 545 546 } 546 547 #endif 547 RT_NOREF(pVM);548 548 549 549 return VINF_SUCCESS; … … 562 562 * 563 563 * @param pGVM The global (ring-0) VM structure. 564 * @param pVM The cross context VM structure.565 564 * @param idCpu Set to 0 if EMT(0) or NIL_VMCPUID if session cleanup 566 565 * thread. 567 566 * @thread EMT(0) or session clean up thread. 568 567 */ 569 VMMR0_INT_DECL(int) VMMR0TermVM(PGVM pGVM, PVMCC pVM,VMCPUID idCpu)568 VMMR0_INT_DECL(int) VMMR0TermVM(PGVM pGVM, VMCPUID idCpu) 570 569 { 571 570 /* … … 575 574 { 576 575 AssertReturn(idCpu == 0, VERR_INVALID_CPU_ID); 577 int rc = GVMMR0ValidateGVMand VMandEMT(pGVM, pVM, idCpu);576 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu); 578 577 if (RT_FAILURE(rc)) 579 578 return rc; … … 581 580 582 581 #ifdef VBOX_WITH_PCI_PASSTHROUGH 583 PciRawR0TermVM(pGVM , pVM);582 PciRawR0TermVM(pGVM); 584 583 #endif 585 584 … … 589 588 if (GVMMR0DoingTermVM(pGVM)) 590 589 { 591 GIMR0TermVM(p VM);592 593 /** @todo I wish to call PGMR0PhysFlushHandyPages(p VM, &pVM->aCpus[idCpu])590 GIMR0TermVM(pGVM); 591 592 /** @todo I wish to call PGMR0PhysFlushHandyPages(pGVM, &pGVM->aCpus[idCpu]) 594 593 * here to make sure we don't leak any shared pages if we crash... */ 595 594 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE 596 PGMR0DynMapTermVM(p VM);597 #endif 598 HMR0TermVM(p VM);595 PGMR0DynMapTermVM(pGVM); 596 #endif 597 HMR0TermVM(pGVM); 599 598 } 600 599 … … 602 601 * Deregister the logger. 603 602 */ 604 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)p VM->pSession);603 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession); 605 604 return VINF_SUCCESS; 606 605 } … … 707 706 * @returns VINF_SUCCESS or VINF_EM_HALT. 708 707 * @param pGVM The ring-0 VM structure. 709 * @param pVM The cross context VM structure.710 708 * @param pGVCpu The ring-0 virtual CPU structure. 711 * @param pVCpu The cross context virtual CPU structure.712 709 * 713 710 * @todo r=bird: All the blocking/waiting and EMT managment should move out of … … 715 712 * parameters and statistics. 716 713 */ 717 static int vmmR0DoHalt(PGVM pGVM, PVMCC pVM, PGVMCPU pGVCpu, PVMCPUCC pVCpu) 718 { 719 Assert(pVCpu == pGVCpu); 720 714 static int vmmR0DoHalt(PGVM pGVM, PGVMCPU pGVCpu) 715 { 721 716 /* 722 717 * Do spin stat historization. 723 718 */ 724 if (++p VCpu->vmm.s.cR0Halts & 0xff)719 if (++pGVCpu->vmm.s.cR0Halts & 0xff) 725 720 { /* likely */ } 726 else if (p VCpu->vmm.s.cR0HaltsSucceeded > pVCpu->vmm.s.cR0HaltsToRing3)727 { 728 p VCpu->vmm.s.cR0HaltsSucceeded = 2;729 p VCpu->vmm.s.cR0HaltsToRing3 = 0;721 else if (pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3) 722 { 723 pGVCpu->vmm.s.cR0HaltsSucceeded = 2; 724 pGVCpu->vmm.s.cR0HaltsToRing3 = 0; 730 725 } 731 726 else 732 727 { 733 p VCpu->vmm.s.cR0HaltsSucceeded = 0;734 p VCpu->vmm.s.cR0HaltsToRing3 = 2;728 pGVCpu->vmm.s.cR0HaltsSucceeded = 0; 729 pGVCpu->vmm.s.cR0HaltsToRing3 = 2; 735 730 } 736 731 … … 750 745 * Check preconditions. 751 746 */ 752 unsigned const uMWait = EMMonitorWaitIsActive(p VCpu);753 CPUMINTERRUPTIBILITY const enmInterruptibility = CPUMGetGuestInterruptibility(p VCpu);754 if ( p VCpu->vmm.s.fMayHaltInRing0755 && !TRPMHasTrap(p VCpu)747 unsigned const uMWait = EMMonitorWaitIsActive(pGVCpu); 748 CPUMINTERRUPTIBILITY const enmInterruptibility = CPUMGetGuestInterruptibility(pGVCpu); 749 if ( pGVCpu->vmm.s.fMayHaltInRing0 750 && !TRPMHasTrap(pGVCpu) 756 751 && ( enmInterruptibility == CPUMINTERRUPTIBILITY_UNRESTRAINED 757 752 || uMWait > 1)) 758 753 { 759 if ( !VM_FF_IS_ANY_SET(p VM, fVmFFs)760 && !VMCPU_FF_IS_ANY_SET(p VCpu, fCpuFFs))754 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs) 755 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs)) 761 756 { 762 757 /* 763 758 * Interrupts pending already? 764 759 */ 765 if (VMCPU_FF_TEST_AND_CLEAR(p VCpu, VMCPU_FF_UPDATE_APIC))766 APICUpdatePendingInterrupts(p VCpu);760 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC)) 761 APICUpdatePendingInterrupts(pGVCpu); 767 762 768 763 /* … … 772 767 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT; 773 768 774 if (VMCPU_FF_IS_ANY_SET(p VCpu, fIntMask))775 return vmmR0DoHaltInterrupt(p VCpu, uMWait, enmInterruptibility);769 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask)) 770 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility); 776 771 ASMNopPause(); 777 772 … … 780 775 */ 781 776 uint64_t u64Delta; 782 uint64_t u64GipTime = TMTimerPollGIP(p VM, pVCpu, &u64Delta);783 784 if ( !VM_FF_IS_ANY_SET(p VM, fVmFFs)785 && !VMCPU_FF_IS_ANY_SET(p VCpu, fCpuFFs))777 uint64_t u64GipTime = TMTimerPollGIP(pGVM, pGVCpu, &u64Delta); 778 779 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs) 780 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs)) 786 781 { 787 if (VMCPU_FF_TEST_AND_CLEAR(p VCpu, VMCPU_FF_UPDATE_APIC))788 APICUpdatePendingInterrupts(p VCpu);789 790 if (VMCPU_FF_IS_ANY_SET(p VCpu, fIntMask))791 return vmmR0DoHaltInterrupt(p VCpu, uMWait, enmInterruptibility);782 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC)) 783 APICUpdatePendingInterrupts(pGVCpu); 784 785 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask)) 786 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility); 792 787 793 788 /* 794 789 * Wait if there is enough time to the next timer event. 795 790 */ 796 if (u64Delta >= p VCpu->vmm.s.cNsSpinBlockThreshold)791 if (u64Delta >= pGVCpu->vmm.s.cNsSpinBlockThreshold) 797 792 { 798 793 /* If there are few other CPU cores around, we will procrastinate a … … 801 796 dynamically adjust the spin count according to its usfulness or 802 797 something... */ 803 if ( p VCpu->vmm.s.cR0HaltsSucceeded > pVCpu->vmm.s.cR0HaltsToRing3798 if ( pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3 804 799 && RTMpGetOnlineCount() >= 4) 805 800 { … … 810 805 { 811 806 ASMNopPause(); 812 if (VMCPU_FF_TEST_AND_CLEAR(p VCpu, VMCPU_FF_UPDATE_APIC))813 APICUpdatePendingInterrupts(p VCpu);807 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC)) 808 APICUpdatePendingInterrupts(pGVCpu); 814 809 ASMNopPause(); 815 if (VM_FF_IS_ANY_SET(p VM, fVmFFs))810 if (VM_FF_IS_ANY_SET(pGVM, fVmFFs)) 816 811 { 817 STAM_REL_COUNTER_INC(&p VCpu->vmm.s.StatR0HaltToR3FromSpin);812 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin); 818 813 return VINF_EM_HALT; 819 814 } 820 815 ASMNopPause(); 821 if (VMCPU_FF_IS_ANY_SET(p VCpu, fCpuFFs))816 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs)) 822 817 { 823 STAM_REL_COUNTER_INC(&p VCpu->vmm.s.StatR0HaltToR3FromSpin);818 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin); 824 819 return VINF_EM_HALT; 825 820 } 826 821 ASMNopPause(); 827 if (VMCPU_FF_IS_ANY_SET(p VCpu, fIntMask))822 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask)) 828 823 { 829 STAM_REL_COUNTER_INC(&p VCpu->vmm.s.StatR0HaltExecFromSpin);830 return vmmR0DoHaltInterrupt(p VCpu, uMWait, enmInterruptibility);824 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromSpin); 825 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility); 831 826 } 832 827 ASMNopPause(); … … 836 831 /* Block. We have to set the state to VMCPUSTATE_STARTED_HALTED here so ring-3 837 832 knows when to notify us (cannot access VMINTUSERPERVMCPU::fWait from here). */ 838 VMCPU_CMPXCHG_STATE(p VCpu, VMCPUSTATE_STARTED_HALTED, VMCPUSTATE_STARTED);833 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED_HALTED, VMCPUSTATE_STARTED); 839 834 uint64_t const u64StartSchedHalt = RTTimeNanoTS(); 840 int rc = GVMMR0SchedHalt(pGVM, p VM, pGVCpu, u64GipTime);835 int rc = GVMMR0SchedHalt(pGVM, pGVCpu, u64GipTime); 841 836 uint64_t const u64EndSchedHalt = RTTimeNanoTS(); 842 837 uint64_t const cNsElapsedSchedHalt = u64EndSchedHalt - u64StartSchedHalt; 843 VMCPU_CMPXCHG_STATE(p VCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);844 STAM_REL_PROFILE_ADD_PERIOD(&p VCpu->vmm.s.StatR0HaltBlock, cNsElapsedSchedHalt);838 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED); 839 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlock, cNsElapsedSchedHalt); 845 840 if ( rc == VINF_SUCCESS 846 841 || rc == VERR_INTERRUPTED) … … 850 845 int64_t const cNsOverslept = u64EndSchedHalt - u64GipTime; 851 846 if (cNsOverslept > 50000) 852 STAM_REL_PROFILE_ADD_PERIOD(&p VCpu->vmm.s.StatR0HaltBlockOverslept, cNsOverslept);847 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOverslept, cNsOverslept); 853 848 else if (cNsOverslept < -50000) 854 STAM_REL_PROFILE_ADD_PERIOD(&p VCpu->vmm.s.StatR0HaltBlockInsomnia, cNsElapsedSchedHalt);849 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockInsomnia, cNsElapsedSchedHalt); 855 850 else 856 STAM_REL_PROFILE_ADD_PERIOD(&p VCpu->vmm.s.StatR0HaltBlockOnTime, cNsElapsedSchedHalt);851 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOnTime, cNsElapsedSchedHalt); 857 852 858 853 /* 859 854 * Recheck whether we can resume execution or have to go to ring-3. 860 855 */ 861 if ( !VM_FF_IS_ANY_SET(p VM, fVmFFs)862 && !VMCPU_FF_IS_ANY_SET(p VCpu, fCpuFFs))856 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs) 857 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs)) 863 858 { 864 if (VMCPU_FF_TEST_AND_CLEAR(p VCpu, VMCPU_FF_UPDATE_APIC))865 APICUpdatePendingInterrupts(p VCpu);866 if (VMCPU_FF_IS_ANY_SET(p VCpu, fIntMask))859 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC)) 860 APICUpdatePendingInterrupts(pGVCpu); 861 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask)) 867 862 { 868 STAM_REL_COUNTER_INC(&p VCpu->vmm.s.StatR0HaltExecFromBlock);869 return vmmR0DoHaltInterrupt(p VCpu, uMWait, enmInterruptibility);863 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromBlock); 864 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility); 870 865 } 871 866 } … … 1068 1063 /** 1069 1064 * Record return code statistics 1070 * @param p VMThe cross context VM structure.1065 * @param pGVM The cross context VM structure. 1071 1066 * @param pVCpu The cross context virtual CPU structure. 1072 1067 * @param rc The status code. … … 1273 1268 * 1274 1269 * @param pGVM The global (ring-0) VM structure. 1275 * @param pVM The cross context VM structure.1276 * The return code isstored in pVM->vmm.s.iLastGZRc.1270 * @param pVMIgnored The cross context VM structure. The return code is 1271 * stored in pVM->vmm.s.iLastGZRc. 1277 1272 * @param idCpu The Virtual CPU ID of the calling EMT. 1278 1273 * @param enmOperation Which operation to execute. 1279 1274 * @remarks Assume called with interrupts _enabled_. 1280 1275 */ 1281 VMMR0DECL(void) VMMR0EntryFast(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation) 1282 { 1276 VMMR0DECL(void) VMMR0EntryFast(PGVM pGVM, PVMCC pVMIgnored, VMCPUID idCpu, VMMR0OPERATION enmOperation) 1277 { 1278 RT_NOREF(pVMIgnored); 1279 1283 1280 /* 1284 1281 * Validation. 1285 1282 */ 1286 1283 if ( idCpu < pGVM->cCpus 1287 && pGVM->cCpus == p VM->cCpus)1284 && pGVM->cCpus == pGVM->cCpusUnsafe) 1288 1285 { /*likely*/ } 1289 1286 else 1290 1287 { 1291 SUPR0Printf("VMMR0EntryFast: Bad idCpu=%#x cCpus=%#x /%#x\n", idCpu, pGVM->cCpus, pVM->cCpus);1288 SUPR0Printf("VMMR0EntryFast: Bad idCpu=%#x cCpus=%#x cCpusUnsafe=%#x\n", idCpu, pGVM->cCpus, pGVM->cCpusUnsafe); 1292 1289 return; 1293 1290 } 1294 1291 1295 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu]; 1296 PVMCPUCC pVCpu = pGVCpu; 1292 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu]; 1297 1293 RTNATIVETHREAD const hNativeThread = RTThreadNativeSelf(); 1298 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread1299 && p VCpu->hNativeThreadR0 == hNativeThread))1294 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread 1295 && pGVCpu->hNativeThreadR0 == hNativeThread)) 1300 1296 { /* likely */ } 1301 1297 else 1302 1298 { 1303 SUPR0Printf("VMMR0EntryFast: Bad thread idCpu=%#x hNativeSelf=%p pGVCpu->hEmt=%p p VCpu->hNativeThreadR0=%p\n",1304 idCpu, hNativeThread, pGVCpu->hEMT, p VCpu->hNativeThreadR0);1299 SUPR0Printf("VMMR0EntryFast: Bad thread idCpu=%#x hNativeSelf=%p pGVCpu->hEmt=%p pGVCpu->hNativeThreadR0=%p\n", 1300 idCpu, hNativeThread, pGVCpu->hEMT, pGVCpu->hNativeThreadR0); 1305 1301 return; 1306 1302 } … … 1310 1306 */ 1311 1307 VMM_CHECK_SMAP_SETUP(); 1312 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1308 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1313 1309 1314 1310 /* … … 1327 1323 * Disable preemption. 1328 1324 */ 1329 Assert(!vmmR0ThreadCtxHookIsEnabled(p VCpu));1325 Assert(!vmmR0ThreadCtxHookIsEnabled(pGVCpu)); 1330 1326 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; 1331 1327 RTThreadPreemptDisable(&PreemptState); … … 1340 1336 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet))) 1341 1337 { 1342 p VCpu->iHostCpuSet = iHostCpuSet;1343 ASMAtomicWriteU32(&p VCpu->idHostCpu, idHostCpu);1338 pGVCpu->iHostCpuSet = iHostCpuSet; 1339 ASMAtomicWriteU32(&pGVCpu->idHostCpu, idHostCpu); 1344 1340 1345 1341 /* 1346 1342 * Update the periodic preemption timer if it's active. 1347 1343 */ 1348 if (p VM->vmm.s.fUsePeriodicPreemptionTimers)1349 GVMMR0SchedUpdatePeriodicPreemptionTimer(p VM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));1350 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1344 if (pGVM->vmm.s.fUsePeriodicPreemptionTimers) 1345 GVMMR0SchedUpdatePeriodicPreemptionTimer(pGVM, pGVCpu->idHostCpu, TMCalcHostTimerFrequency(pGVM, pGVCpu)); 1346 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1351 1347 1352 1348 #ifdef VMM_R0_TOUCH_FPU … … 1365 1361 * Enable the context switching hook. 1366 1362 */ 1367 if (p VCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)1363 if (pGVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK) 1368 1364 { 1369 Assert(!RTThreadCtxHookIsEnabled(p VCpu->vmm.s.hCtxHook));1370 int rc2 = RTThreadCtxHookEnable(p VCpu->vmm.s.hCtxHook); AssertRC(rc2);1365 Assert(!RTThreadCtxHookIsEnabled(pGVCpu->vmm.s.hCtxHook)); 1366 int rc2 = RTThreadCtxHookEnable(pGVCpu->vmm.s.hCtxHook); AssertRC(rc2); 1371 1367 } 1372 1368 … … 1374 1370 * Enter HM context. 1375 1371 */ 1376 rc = HMR0Enter(p VCpu);1372 rc = HMR0Enter(pGVCpu); 1377 1373 if (RT_SUCCESS(rc)) 1378 1374 { 1379 VMCPU_SET_STATE(p VCpu, VMCPUSTATE_STARTED_HM);1375 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED_HM); 1380 1376 1381 1377 /* … … 1383 1379 * we're in HM context. 1384 1380 */ 1385 if (vmmR0ThreadCtxHookIsEnabled(p VCpu))1381 if (vmmR0ThreadCtxHookIsEnabled(pGVCpu)) 1386 1382 { 1387 1383 fPreemptRestored = true; … … 1392 1388 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode). 1393 1389 */ 1394 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1395 rc = vmmR0CallRing3SetJmp(&p VCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pVM, pVCpu);1396 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1390 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1391 rc = vmmR0CallRing3SetJmp(&pGVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pGVM, pGVCpu); 1392 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1397 1393 1398 1394 /* … … 1400 1396 * assertions are going to panic the host since we're outside the setjmp/longjmp zone. 1401 1397 */ 1402 if (RT_UNLIKELY( VMCPU_GET_STATE(p VCpu) != VMCPUSTATE_STARTED_HM1398 if (RT_UNLIKELY( VMCPU_GET_STATE(pGVCpu) != VMCPUSTATE_STARTED_HM 1403 1399 && RT_SUCCESS_NP(rc) && rc != VINF_VMM_CALL_HOST )) 1404 1400 { 1405 p VM->vmm.s.szRing0AssertMsg1[0] = '\0';1406 RTStrPrintf(p VM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),1407 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(p VCpu), VMCPUSTATE_STARTED_HM);1401 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0'; 1402 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2), 1403 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pGVCpu), VMCPUSTATE_STARTED_HM); 1408 1404 rc = VERR_VMM_WRONG_HM_VMCPU_STATE; 1409 1405 } 1410 1406 /** @todo Get rid of this. HM shouldn't disable the context hook. */ 1411 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(p VCpu)))1407 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pGVCpu))) 1412 1408 { 1413 p VM->vmm.s.szRing0AssertMsg1[0] = '\0';1414 RTStrPrintf(p VM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),1415 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", p VCpu, pVCpu->idCpu, rc);1409 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0'; 1410 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2), 1411 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pGVCpu, pGVCpu->idCpu, rc); 1416 1412 rc = VERR_INVALID_STATE; 1417 1413 } 1418 1414 1419 VMCPU_SET_STATE(p VCpu, VMCPUSTATE_STARTED);1415 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED); 1420 1416 } 1421 STAM_COUNTER_INC(&p VM->vmm.s.StatRunGC);1417 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC); 1422 1418 1423 1419 /* … … 1425 1421 * hook / restore preemption. 1426 1422 */ 1427 p VCpu->iHostCpuSet = UINT32_MAX;1428 ASMAtomicWriteU32(&p VCpu->idHostCpu, NIL_RTCPUID);1423 pGVCpu->iHostCpuSet = UINT32_MAX; 1424 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID); 1429 1425 1430 1426 /* … … 1435 1431 * when we get here, but the IPRT API handles that. 1436 1432 */ 1437 if (p VCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)1433 if (pGVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK) 1438 1434 { 1439 ASMAtomicWriteU32(&p VCpu->idHostCpu, NIL_RTCPUID);1440 RTThreadCtxHookDisable(p VCpu->vmm.s.hCtxHook);1435 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID); 1436 RTThreadCtxHookDisable(pGVCpu->vmm.s.hCtxHook); 1441 1437 } 1442 1438 } … … 1447 1443 { 1448 1444 rc = VINF_EM_RAW_INTERRUPT; 1449 p VCpu->iHostCpuSet = UINT32_MAX;1450 ASMAtomicWriteU32(&p VCpu->idHostCpu, NIL_RTCPUID);1445 pGVCpu->iHostCpuSet = UINT32_MAX; 1446 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID); 1451 1447 } 1452 1448 … … 1456 1452 RTThreadPreemptRestore(&PreemptState); 1457 1453 1458 p VCpu->vmm.s.iLastGZRc = rc;1454 pGVCpu->vmm.s.iLastGZRc = rc; 1459 1455 1460 1456 /* Fire dtrace probe and collect statistics. */ 1461 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(p VCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);1457 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc); 1462 1458 #ifdef VBOX_WITH_STATISTICS 1463 vmmR0RecordRC(p VM, pVCpu, rc);1459 vmmR0RecordRC(pGVM, pGVCpu, rc); 1464 1460 #endif 1465 1461 #if 1 … … 1471 1467 else 1472 1468 { 1473 p VCpu->vmm.s.iLastGZRc = rc = vmmR0DoHalt(pGVM, pVM, pGVCpu, pVCpu);1469 pGVCpu->vmm.s.iLastGZRc = rc = vmmR0DoHalt(pGVM, pGVCpu); 1474 1470 if (rc == VINF_SUCCESS) 1475 1471 { 1476 p VCpu->vmm.s.cR0HaltsSucceeded++;1472 pGVCpu->vmm.s.cR0HaltsSucceeded++; 1477 1473 continue; 1478 1474 } 1479 p VCpu->vmm.s.cR0HaltsToRing3++;1475 pGVCpu->vmm.s.cR0HaltsToRing3++; 1480 1476 } 1481 1477 #endif … … 1486 1482 else 1487 1483 { 1488 p VCpu->iHostCpuSet = UINT32_MAX;1489 ASMAtomicWriteU32(&p VCpu->idHostCpu, NIL_RTCPUID);1484 pGVCpu->iHostCpuSet = UINT32_MAX; 1485 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID); 1490 1486 RTThreadPreemptRestore(&PreemptState); 1491 1487 if (iHostCpuSet < RTCPUSET_MAX_CPUS) 1492 1488 { 1493 int rc = SUPR0TscDeltaMeasureBySetIndex(p VM->pSession, iHostCpuSet, 0 /*fFlags*/,1489 int rc = SUPR0TscDeltaMeasureBySetIndex(pGVM->pSession, iHostCpuSet, 0 /*fFlags*/, 1494 1490 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/, 1495 1491 0 /*default cTries*/); 1496 1492 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE) 1497 p VCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;1493 pGVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3; 1498 1494 else 1499 p VCpu->vmm.s.iLastGZRc = rc;1495 pGVCpu->vmm.s.iLastGZRc = rc; 1500 1496 } 1501 1497 else 1502 p VCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;1498 pGVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX; 1503 1499 } 1504 1500 break; … … 1515 1511 * Setup the longjmp machinery and execute guest code (calls NEMR0RunGuestCode). 1516 1512 */ 1517 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING); 1518 int rc = vmmR0CallRing3SetJmp2(&pVCpu->vmm.s.CallRing3JmpBufR0, NEMR0RunGuestCode, pGVM, idCpu); 1519 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING); 1520 STAM_COUNTER_INC(&pVM->vmm.s.StatRunGC); 1521 1522 pVCpu->vmm.s.iLastGZRc = rc; 1513 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1514 # ifdef VBOXSTRICTRC_STRICT_ENABLED 1515 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmm.s.CallRing3JmpBufR0, (PFNVMMR0SETJMP2)NEMR0RunGuestCode, pGVM, idCpu); 1516 # else 1517 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmm.s.CallRing3JmpBufR0, NEMR0RunGuestCode, pGVM, idCpu); 1518 # endif 1519 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1520 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC); 1521 1522 pGVCpu->vmm.s.iLastGZRc = rc; 1523 1523 1524 1524 /* 1525 1525 * Fire dtrace probe and collect statistics. 1526 1526 */ 1527 VBOXVMM_R0_VMM_RETURN_TO_RING3_NEM(p VCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);1527 VBOXVMM_R0_VMM_RETURN_TO_RING3_NEM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc); 1528 1528 # ifdef VBOX_WITH_STATISTICS 1529 vmmR0RecordRC(p VM, pVCpu, rc);1529 vmmR0RecordRC(pGVM, pGVCpu, rc); 1530 1530 # endif 1531 1531 break; … … 1538 1538 */ 1539 1539 case VMMR0_DO_NOP: 1540 p VCpu->vmm.s.iLastGZRc = VINF_SUCCESS;1540 pGVCpu->vmm.s.iLastGZRc = VINF_SUCCESS; 1541 1541 break; 1542 1542 … … 1546 1546 default: 1547 1547 AssertMsgFailed(("%#x\n", enmOperation)); 1548 p VCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;1549 break; 1550 } 1551 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1548 pGVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED; 1549 break; 1550 } 1551 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1552 1552 } 1553 1553 … … 1557 1557 * 1558 1558 * @returns true / false accordingly. 1559 * @param p VM The cross contextVM structure.1559 * @param pGVM The global (ring-0) VM structure. 1560 1560 * @param pClaimedSession The session claim to validate. 1561 1561 * @param pSession The session argument. 1562 1562 */ 1563 DECLINLINE(bool) vmmR0IsValidSession(P VMCC pVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)1563 DECLINLINE(bool) vmmR0IsValidSession(PGVM pGVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession) 1564 1564 { 1565 1565 /* This must be set! */ … … 1568 1568 1569 1569 /* Only one out of the two. */ 1570 if (p VM && pClaimedSession)1570 if (pGVM && pClaimedSession) 1571 1571 return false; 1572 if (p VM)1573 pClaimedSession = p VM->pSession;1572 if (pGVM) 1573 pClaimedSession = pGVM->pSession; 1574 1574 return pClaimedSession == pSession; 1575 1575 } … … 1582 1582 * @returns VBox status code. 1583 1583 * @param pGVM The global (ring-0) VM structure. 1584 * @param pVM The cross context VM structure.1585 1584 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM 1586 1585 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't … … 1593 1592 * @remarks Assume called with interrupts _enabled_. 1594 1593 */ 1595 static int vmmR0EntryExWorker(PGVM pGVM, PVMCC pVM,VMCPUID idCpu, VMMR0OPERATION enmOperation,1594 static int vmmR0EntryExWorker(PGVM pGVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, 1596 1595 PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession) 1597 1596 { 1598 1597 /* 1599 * Validate pGVM, pVM and idCpu for consistency and validity. 1600 */ 1601 if ( pGVM != NULL 1602 || pVM != NULL) 1603 { 1604 if (RT_LIKELY( RT_VALID_PTR(pGVM) 1605 && RT_VALID_PTR(pVM) 1606 && ((uintptr_t)pVM & PAGE_OFFSET_MASK) == 0)) 1598 * Validate pGVM and idCpu for consistency and validity. 1599 */ 1600 if (pGVM != NULL) 1601 { 1602 if (RT_LIKELY(((uintptr_t)pGVM & PAGE_OFFSET_MASK) == 0)) 1607 1603 { /* likely */ } 1608 1604 else 1609 1605 { 1610 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p and/or pVM=%p! (op=%d)\n", pGVM, pVM, enmOperation);1606 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p! (op=%d)\n", pGVM, enmOperation); 1611 1607 return VERR_INVALID_POINTER; 1612 }1613 1614 if (RT_LIKELY(pGVM == pVM))1615 { /* likely */ }1616 else1617 {1618 SUPR0Printf("vmmR0EntryExWorker: pVM mismatch: got %p, pGVM/pVM=%p\n", pVM, pGVM);1619 return VERR_INVALID_PARAMETER;1620 1608 } 1621 1609 … … 1628 1616 } 1629 1617 1630 if (RT_LIKELY( pVM->enmVMState >= VMSTATE_CREATING 1631 && pVM->enmVMState <= VMSTATE_TERMINATED 1632 && pVM->cCpus == pGVM->cCpus 1633 && pVM->pSession == pSession 1634 && pVM->pSelf == pVM)) 1618 if (RT_LIKELY( pGVM->enmVMState >= VMSTATE_CREATING 1619 && pGVM->enmVMState <= VMSTATE_TERMINATED 1620 && pGVM->pSession == pSession 1621 && pGVM->pSelf == pGVM)) 1635 1622 { /* likely */ } 1636 1623 else 1637 1624 { 1638 SUPR0Printf("vmmR0EntryExWorker: Invalid p VM=%p:{.enmVMState=%d, .cCpus=%#x(==%#x), .pSession=%p(==%p), .pSelf=%p(==%p)}! (op=%d)\n",1639 p VM, pVM->enmVMState, pVM->cCpus, pGVM->cCpus, pVM->pSession, pSession, pVM->pSelf, pVM, enmOperation);1625 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p:{.enmVMState=%d, .cCpus=%#x, .pSession=%p(==%p), .pSelf=%p(==%p)}! (op=%d)\n", 1626 pGVM, pGVM->enmVMState, pGVM->cCpus, pGVM->pSession, pSession, pGVM->pSelf, pGVM, enmOperation); 1640 1627 return VERR_INVALID_POINTER; 1641 1628 } … … 1665 1652 */ 1666 1653 case VMMR0_DO_GVMM_CREATE_VM: 1667 if (pGVM == NULL && pVM == NULL &&u64Arg == 0 && idCpu == NIL_VMCPUID)1654 if (pGVM == NULL && u64Arg == 0 && idCpu == NIL_VMCPUID) 1668 1655 rc = GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr, pSession); 1669 1656 else … … 1674 1661 case VMMR0_DO_GVMM_DESTROY_VM: 1675 1662 if (pReqHdr == NULL && u64Arg == 0) 1676 rc = GVMMR0DestroyVM(pGVM , pVM);1663 rc = GVMMR0DestroyVM(pGVM); 1677 1664 else 1678 1665 rc = VERR_INVALID_PARAMETER; … … 1681 1668 1682 1669 case VMMR0_DO_GVMM_REGISTER_VMCPU: 1683 if (pGVM != NULL && pVM != NULL)1684 rc = GVMMR0RegisterVCpu(pGVM, pVM,idCpu);1670 if (pGVM != NULL) 1671 rc = GVMMR0RegisterVCpu(pGVM, idCpu); 1685 1672 else 1686 1673 rc = VERR_INVALID_PARAMETER; 1687 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1674 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1688 1675 break; 1689 1676 1690 1677 case VMMR0_DO_GVMM_DEREGISTER_VMCPU: 1691 if (pGVM != NULL && pVM != NULL)1692 rc = GVMMR0DeregisterVCpu(pGVM, pVM,idCpu);1678 if (pGVM != NULL) 1679 rc = GVMMR0DeregisterVCpu(pGVM, idCpu); 1693 1680 else 1694 1681 rc = VERR_INVALID_PARAMETER; 1695 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1682 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1696 1683 break; 1697 1684 … … 1699 1686 if (pReqHdr) 1700 1687 return VERR_INVALID_PARAMETER; 1701 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1702 rc = GVMMR0SchedHaltReq(pGVM, pVM,idCpu, u64Arg);1703 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1688 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1689 rc = GVMMR0SchedHaltReq(pGVM, idCpu, u64Arg); 1690 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1704 1691 break; 1705 1692 … … 1707 1694 if (pReqHdr || u64Arg) 1708 1695 return VERR_INVALID_PARAMETER; 1709 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1710 rc = GVMMR0SchedWakeUp(pGVM, pVM,idCpu);1711 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1696 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1697 rc = GVMMR0SchedWakeUp(pGVM, idCpu); 1698 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1712 1699 break; 1713 1700 … … 1715 1702 if (pReqHdr || u64Arg) 1716 1703 return VERR_INVALID_PARAMETER; 1717 rc = GVMMR0SchedPoke(pGVM, pVM,idCpu);1718 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1704 rc = GVMMR0SchedPoke(pGVM, idCpu); 1705 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1719 1706 break; 1720 1707 … … 1722 1709 if (u64Arg) 1723 1710 return VERR_INVALID_PARAMETER; 1724 rc = GVMMR0SchedWakeUpAndPokeCpusReq(pGVM, pVM,(PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);1725 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1711 rc = GVMMR0SchedWakeUpAndPokeCpusReq(pGVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr); 1712 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1726 1713 break; 1727 1714 … … 1729 1716 if (pReqHdr || u64Arg > 1) 1730 1717 return VERR_INVALID_PARAMETER; 1731 rc = GVMMR0SchedPoll(pGVM, pVM,idCpu, !!u64Arg);1732 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1718 rc = GVMMR0SchedPoll(pGVM, idCpu, !!u64Arg); 1719 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1733 1720 break; 1734 1721 … … 1736 1723 if (u64Arg) 1737 1724 return VERR_INVALID_PARAMETER; 1738 rc = GVMMR0QueryStatisticsReq(pGVM, pVM,(PGVMMQUERYSTATISTICSSREQ)pReqHdr, pSession);1739 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1725 rc = GVMMR0QueryStatisticsReq(pGVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr, pSession); 1726 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1740 1727 break; 1741 1728 … … 1743 1730 if (u64Arg) 1744 1731 return VERR_INVALID_PARAMETER; 1745 rc = GVMMR0ResetStatisticsReq(pGVM, pVM,(PGVMMRESETSTATISTICSSREQ)pReqHdr, pSession);1746 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1732 rc = GVMMR0ResetStatisticsReq(pGVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr, pSession); 1733 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1747 1734 break; 1748 1735 … … 1751 1738 */ 1752 1739 case VMMR0_DO_VMMR0_INIT: 1753 rc = vmmR0InitVM(pGVM, pVM,RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));1754 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1740 rc = vmmR0InitVM(pGVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg)); 1741 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1755 1742 break; 1756 1743 … … 1759 1746 */ 1760 1747 case VMMR0_DO_VMMR0_INIT_EMT: 1761 rc = vmmR0InitVMEmt(pGVM, pVM,idCpu);1762 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1748 rc = vmmR0InitVMEmt(pGVM, idCpu); 1749 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1763 1750 break; 1764 1751 … … 1767 1754 */ 1768 1755 case VMMR0_DO_VMMR0_TERM: 1769 rc = VMMR0TermVM(pGVM, pVM,0 /*idCpu*/);1770 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1756 rc = VMMR0TermVM(pGVM, 0 /*idCpu*/); 1757 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1771 1758 break; 1772 1759 … … 1775 1762 */ 1776 1763 case VMMR0_DO_HM_ENABLE: 1777 rc = HMR0EnableAllCpus(p VM);1778 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1764 rc = HMR0EnableAllCpus(pGVM); 1765 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1779 1766 break; 1780 1767 … … 1783 1770 */ 1784 1771 case VMMR0_DO_HM_SETUP_VM: 1785 rc = HMR0SetupVM(p VM);1786 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1772 rc = HMR0SetupVM(pGVM); 1773 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1787 1774 break; 1788 1775 … … 1793 1780 if (idCpu == NIL_VMCPUID) 1794 1781 return VERR_INVALID_CPU_ID; 1795 rc = PGMR0PhysAllocateHandyPages(pGVM, pVM,idCpu);1796 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1782 rc = PGMR0PhysAllocateHandyPages(pGVM, idCpu); 1783 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1797 1784 break; 1798 1785 … … 1800 1787 if (idCpu == NIL_VMCPUID) 1801 1788 return VERR_INVALID_CPU_ID; 1802 rc = PGMR0PhysFlushHandyPages(pGVM, pVM,idCpu);1803 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1789 rc = PGMR0PhysFlushHandyPages(pGVM, idCpu); 1790 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1804 1791 break; 1805 1792 … … 1807 1794 if (idCpu == NIL_VMCPUID) 1808 1795 return VERR_INVALID_CPU_ID; 1809 rc = PGMR0PhysAllocateLargeHandyPage(pGVM, pVM,idCpu);1810 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1796 rc = PGMR0PhysAllocateLargeHandyPage(pGVM, idCpu); 1797 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1811 1798 break; 1812 1799 … … 1814 1801 if (idCpu != 0) 1815 1802 return VERR_INVALID_CPU_ID; 1816 rc = PGMR0PhysSetupIoMmu(pGVM , pVM);1817 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1803 rc = PGMR0PhysSetupIoMmu(pGVM); 1804 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1818 1805 break; 1819 1806 … … 1824 1811 if (u64Arg) 1825 1812 return VERR_INVALID_PARAMETER; 1826 rc = GMMR0InitialReservationReq(pGVM, pVM,idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);1827 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1813 rc = GMMR0InitialReservationReq(pGVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr); 1814 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1828 1815 break; 1829 1816 … … 1831 1818 if (u64Arg) 1832 1819 return VERR_INVALID_PARAMETER; 1833 rc = GMMR0UpdateReservationReq(pGVM, pVM,idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);1834 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1820 rc = GMMR0UpdateReservationReq(pGVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr); 1821 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1835 1822 break; 1836 1823 … … 1838 1825 if (u64Arg) 1839 1826 return VERR_INVALID_PARAMETER; 1840 rc = GMMR0AllocatePagesReq(pGVM, pVM,idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);1841 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1827 rc = GMMR0AllocatePagesReq(pGVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr); 1828 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1842 1829 break; 1843 1830 … … 1845 1832 if (u64Arg) 1846 1833 return VERR_INVALID_PARAMETER; 1847 rc = GMMR0FreePagesReq(pGVM, pVM,idCpu, (PGMMFREEPAGESREQ)pReqHdr);1848 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1834 rc = GMMR0FreePagesReq(pGVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr); 1835 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1849 1836 break; 1850 1837 … … 1852 1839 if (u64Arg) 1853 1840 return VERR_INVALID_PARAMETER; 1854 rc = GMMR0FreeLargePageReq(pGVM, pVM,idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);1855 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1841 rc = GMMR0FreeLargePageReq(pGVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr); 1842 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1856 1843 break; 1857 1844 … … 1860 1847 return VERR_INVALID_PARAMETER; 1861 1848 rc = GMMR0QueryHypervisorMemoryStatsReq((PGMMMEMSTATSREQ)pReqHdr); 1862 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1849 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1863 1850 break; 1864 1851 … … 1868 1855 if (u64Arg) 1869 1856 return VERR_INVALID_PARAMETER; 1870 rc = GMMR0QueryMemoryStatsReq(pGVM, pVM,idCpu, (PGMMMEMSTATSREQ)pReqHdr);1871 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1857 rc = GMMR0QueryMemoryStatsReq(pGVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr); 1858 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1872 1859 break; 1873 1860 … … 1875 1862 if (u64Arg) 1876 1863 return VERR_INVALID_PARAMETER; 1877 rc = GMMR0BalloonedPagesReq(pGVM, pVM,idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);1878 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1864 rc = GMMR0BalloonedPagesReq(pGVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr); 1865 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1879 1866 break; 1880 1867 … … 1882 1869 if (u64Arg) 1883 1870 return VERR_INVALID_PARAMETER; 1884 rc = GMMR0MapUnmapChunkReq(pGVM, pVM,(PGMMMAPUNMAPCHUNKREQ)pReqHdr);1885 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1871 rc = GMMR0MapUnmapChunkReq(pGVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr); 1872 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1886 1873 break; 1887 1874 … … 1889 1876 if (pReqHdr) 1890 1877 return VERR_INVALID_PARAMETER; 1891 rc = GMMR0SeedChunk(pGVM, pVM,idCpu, (RTR3PTR)u64Arg);1892 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1878 rc = GMMR0SeedChunk(pGVM, idCpu, (RTR3PTR)u64Arg); 1879 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1893 1880 break; 1894 1881 … … 1898 1885 if (u64Arg) 1899 1886 return VERR_INVALID_PARAMETER; 1900 rc = GMMR0RegisterSharedModuleReq(pGVM, pVM,idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);1901 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1887 rc = GMMR0RegisterSharedModuleReq(pGVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr); 1888 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1902 1889 break; 1903 1890 … … 1907 1894 if (u64Arg) 1908 1895 return VERR_INVALID_PARAMETER; 1909 rc = GMMR0UnregisterSharedModuleReq(pGVM, pVM,idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);1910 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1896 rc = GMMR0UnregisterSharedModuleReq(pGVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr); 1897 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1911 1898 break; 1912 1899 … … 1917 1904 || pReqHdr) 1918 1905 return VERR_INVALID_PARAMETER; 1919 rc = GMMR0ResetSharedModules(pGVM, pVM,idCpu);1920 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1906 rc = GMMR0ResetSharedModules(pGVM, idCpu); 1907 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1921 1908 break; 1922 1909 … … 1929 1916 || pReqHdr) 1930 1917 return VERR_INVALID_PARAMETER; 1931 rc = GMMR0CheckSharedModules(pGVM, pVM,idCpu);1932 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1918 rc = GMMR0CheckSharedModules(pGVM, idCpu); 1919 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1933 1920 break; 1934 1921 } … … 1939 1926 if (u64Arg) 1940 1927 return VERR_INVALID_PARAMETER; 1941 rc = GMMR0FindDuplicatePageReq(pGVM, pVM,(PGMMFINDDUPLICATEPAGEREQ)pReqHdr);1942 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1928 rc = GMMR0FindDuplicatePageReq(pGVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr); 1929 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1943 1930 break; 1944 1931 #endif … … 1947 1934 if (u64Arg) 1948 1935 return VERR_INVALID_PARAMETER; 1949 rc = GMMR0QueryStatisticsReq(pGVM, pVM,(PGMMQUERYSTATISTICSSREQ)pReqHdr);1950 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1936 rc = GMMR0QueryStatisticsReq(pGVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr); 1937 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1951 1938 break; 1952 1939 … … 1954 1941 if (u64Arg) 1955 1942 return VERR_INVALID_PARAMETER; 1956 rc = GMMR0ResetStatisticsReq(pGVM, pVM,(PGMMRESETSTATISTICSSREQ)pReqHdr);1957 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1943 rc = GMMR0ResetStatisticsReq(pGVM, (PGMMRESETSTATISTICSSREQ)pReqHdr); 1944 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1958 1945 break; 1959 1946 … … 1965 1952 case VMMR0_DO_GCFGM_QUERY_VALUE: 1966 1953 { 1967 if (pGVM || pVM ||!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)1954 if (pGVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID) 1968 1955 return VERR_INVALID_PARAMETER; 1969 1956 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr; … … 1982 1969 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value); 1983 1970 } 1984 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1971 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1985 1972 break; 1986 1973 } … … 1993 1980 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID) 1994 1981 return VERR_INVALID_PARAMETER; 1995 rc = PDMR0DriverCallReqHandler(pGVM, pVM,(PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);1996 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1982 rc = PDMR0DriverCallReqHandler(pGVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr); 1983 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1997 1984 break; 1998 1985 } … … 2002 1989 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID) 2003 1990 return VERR_INVALID_PARAMETER; 2004 rc = PDMR0DeviceCallReqHandler(pGVM, pVM,(PPDMDEVICECALLREQHANDLERREQ)pReqHdr);2005 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1991 rc = PDMR0DeviceCallReqHandler(pGVM, (PPDMDEVICECALLREQHANDLERREQ)pReqHdr); 1992 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2006 1993 break; 2007 1994 } … … 2013 2000 { 2014 2001 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr; 2015 if (u64Arg || !pReq || !vmmR0IsValidSession(p VM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)2002 if (u64Arg || !pReq || !vmmR0IsValidSession(pGVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID) 2016 2003 return VERR_INVALID_PARAMETER; 2017 2004 rc = IntNetR0OpenReq(pSession, pReq); 2018 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2005 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2019 2006 break; 2020 2007 } 2021 2008 2022 2009 case VMMR0_DO_INTNET_IF_CLOSE: 2023 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(p VM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)2010 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID) 2024 2011 return VERR_INVALID_PARAMETER; 2025 2012 rc = IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr); 2026 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2013 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2027 2014 break; 2028 2015 2029 2016 2030 2017 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS: 2031 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(p VM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)2018 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID) 2032 2019 return VERR_INVALID_PARAMETER; 2033 2020 rc = IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr); 2034 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2021 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2035 2022 break; 2036 2023 2037 2024 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE: 2038 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(p VM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)2025 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID) 2039 2026 return VERR_INVALID_PARAMETER; 2040 2027 rc = IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr); 2041 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2028 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2042 2029 break; 2043 2030 2044 2031 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS: 2045 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(p VM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)2032 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID) 2046 2033 return VERR_INVALID_PARAMETER; 2047 2034 rc = IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr); 2048 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2035 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2049 2036 break; 2050 2037 2051 2038 case VMMR0_DO_INTNET_IF_SET_ACTIVE: 2052 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(p VM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)2039 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID) 2053 2040 return VERR_INVALID_PARAMETER; 2054 2041 rc = IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr); 2055 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2042 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2056 2043 break; 2057 2044 2058 2045 case VMMR0_DO_INTNET_IF_SEND: 2059 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(p VM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)2046 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID) 2060 2047 return VERR_INVALID_PARAMETER; 2061 2048 rc = IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr); 2062 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2049 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2063 2050 break; 2064 2051 2065 2052 case VMMR0_DO_INTNET_IF_WAIT: 2066 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(p VM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)2053 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID) 2067 2054 return VERR_INVALID_PARAMETER; 2068 2055 rc = IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr); 2069 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2056 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2070 2057 break; 2071 2058 2072 2059 case VMMR0_DO_INTNET_IF_ABORT_WAIT: 2073 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(p VM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)2060 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID) 2074 2061 return VERR_INVALID_PARAMETER; 2075 2062 rc = IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr); 2076 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2063 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2077 2064 break; 2078 2065 … … 2082 2069 */ 2083 2070 case VMMR0_DO_PCIRAW_REQ: 2084 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(p VM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)2085 return VERR_INVALID_PARAMETER; 2086 rc = PciRawR0ProcessReq(pGVM, p VM, pSession, (PPCIRAWSENDREQ)pReqHdr);2087 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2071 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID) 2072 return VERR_INVALID_PARAMETER; 2073 rc = PciRawR0ProcessReq(pGVM, pSession, (PPCIRAWSENDREQ)pReqHdr); 2074 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2088 2075 break; 2089 2076 #endif … … 2097 2084 if (u64Arg || pReqHdr || idCpu != 0) 2098 2085 return VERR_INVALID_PARAMETER; 2099 rc = NEMR0InitVM(pGVM , pVM);2100 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2086 rc = NEMR0InitVM(pGVM); 2087 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2101 2088 break; 2102 2089 … … 2104 2091 if (u64Arg || pReqHdr || idCpu != 0) 2105 2092 return VERR_INVALID_PARAMETER; 2106 rc = NEMR0InitVMPart2(pGVM , pVM);2107 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2093 rc = NEMR0InitVMPart2(pGVM); 2094 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2108 2095 break; 2109 2096 … … 2111 2098 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID) 2112 2099 return VERR_INVALID_PARAMETER; 2113 rc = NEMR0MapPages(pGVM, pVM,idCpu);2114 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2100 rc = NEMR0MapPages(pGVM, idCpu); 2101 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2115 2102 break; 2116 2103 … … 2118 2105 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID) 2119 2106 return VERR_INVALID_PARAMETER; 2120 rc = NEMR0UnmapPages(pGVM, pVM,idCpu);2121 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2107 rc = NEMR0UnmapPages(pGVM, idCpu); 2108 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2122 2109 break; 2123 2110 … … 2125 2112 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID) 2126 2113 return VERR_INVALID_PARAMETER; 2127 rc = NEMR0ExportState(pGVM, pVM,idCpu);2128 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2114 rc = NEMR0ExportState(pGVM, idCpu); 2115 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2129 2116 break; 2130 2117 … … 2132 2119 if (pReqHdr || idCpu == NIL_VMCPUID) 2133 2120 return VERR_INVALID_PARAMETER; 2134 rc = NEMR0ImportState(pGVM, pVM,idCpu, u64Arg);2135 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2121 rc = NEMR0ImportState(pGVM, idCpu, u64Arg); 2122 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2136 2123 break; 2137 2124 … … 2139 2126 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID) 2140 2127 return VERR_INVALID_PARAMETER; 2141 rc = NEMR0QueryCpuTick(pGVM, pVM,idCpu);2142 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2128 rc = NEMR0QueryCpuTick(pGVM, idCpu); 2129 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2143 2130 break; 2144 2131 … … 2146 2133 if (pReqHdr || idCpu == NIL_VMCPUID) 2147 2134 return VERR_INVALID_PARAMETER; 2148 rc = NEMR0ResumeCpuTickOnAll(pGVM, pVM,idCpu, u64Arg);2149 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2135 rc = NEMR0ResumeCpuTickOnAll(pGVM, idCpu, u64Arg); 2136 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2150 2137 break; 2151 2138 … … 2153 2140 if (u64Arg || pReqHdr) 2154 2141 return VERR_INVALID_PARAMETER; 2155 rc = NEMR0UpdateStatistics(pGVM, pVM,idCpu);2156 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2142 rc = NEMR0UpdateStatistics(pGVM, idCpu); 2143 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2157 2144 break; 2158 2145 … … 2161 2148 if (pReqHdr) 2162 2149 return VERR_INVALID_PARAMETER; 2163 rc = NEMR0DoExperiment(pGVM, pVM,idCpu, u64Arg);2164 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2150 rc = NEMR0DoExperiment(pGVM, idCpu, u64Arg); 2151 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2165 2152 break; 2166 2153 # endif … … 2200 2187 { 2201 2188 PGVM pGVM; 2202 PVMCC pVM;2203 2189 VMCPUID idCpu; 2204 2190 VMMR0OPERATION enmOperation; … … 2219 2205 { 2220 2206 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pGVM, 2221 ((PVMMR0ENTRYEXARGS)pvArgs)->pVM,2222 2207 ((PVMMR0ENTRYEXARGS)pvArgs)->idCpu, 2223 2208 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation, … … 2251 2236 if ( pVM != NULL 2252 2237 && pGVM != NULL 2238 && pVM == pGVM /** @todo drop pGVM */ 2253 2239 && idCpu < pGVM->cCpus 2254 && p VM->pSession == pSession2255 && p VM->pSelf != NULL)2240 && pGVM->pSession == pSession 2241 && pGVM->pSelf == pVM) 2256 2242 { 2257 2243 switch (enmOperation) … … 2279 2265 VMMR0ENTRYEXARGS Args; 2280 2266 Args.pGVM = pGVM; 2281 Args.pVM = pVM;2282 2267 Args.idCpu = idCpu; 2283 2268 Args.enmOperation = enmOperation; … … 2294 2279 } 2295 2280 } 2296 return vmmR0EntryExWorker(pGVM, pVM,idCpu, enmOperation, pReq, u64Arg, pSession);2281 return vmmR0EntryExWorker(pGVM, idCpu, enmOperation, pReq, u64Arg, pSession); 2297 2282 } 2298 2283 -
trunk/src/VBox/VMM/VMMR3/NEMR3Native-win.cpp
r80333 r80346 1724 1724 } 1725 1725 #endif 1726 return nemHCWinRunGC(pVM, pVCpu , NULL /*pGVM*/, NULL /*pGVCpu*/);1726 return nemHCWinRunGC(pVM, pVCpu); 1727 1727 } 1728 1728
Note:
See TracChangeset
for help on using the changeset viewer.