VirtualBox

Changeset 80346 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Aug 19, 2019 7:36:29 PM (5 years ago)
Author:
vboxsync
Message:

VMM,PciRaw: Eliminate duplicate PGVM/PVMCC and PGVMCPU/PVMCPUCC parameters in ring-0 code. butref:9217

Location:
trunk/src/VBox/VMM
Files:
9 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/NEMAllNativeTemplate-win.cpp.h

    r80334 r80346  
    3636# define NEMWIN_NEED_GET_REGISTER
    3737# if defined(IN_RING0) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
    38 #  define NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_pGVCpu, a_enmReg, a_Expr, a_Msg) \
     38#  define NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_enmReg, a_Expr, a_Msg) \
    3939    do { \
    4040        HV_REGISTER_VALUE TmpVal; \
    41         nemHCWinGetRegister(a_pVCpu, a_pGVCpu, a_enmReg, &TmpVal); \
     41        nemHCWinGetRegister(a_pVCpu, a_enmReg, &TmpVal); \
    4242        AssertMsg(a_Expr, a_Msg); \
    4343    } while (0)
    4444# else
    45 #  define NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_pGVCpu, a_enmReg, a_Expr, a_Msg) \
     45#  define NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_enmReg, a_Expr, a_Msg) \
    4646    do { \
    4747        WHV_REGISTER_VALUE TmpVal; \
     
    5151# endif
    5252#else
    53 # define NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_pGVCpu, a_enmReg, a_Expr, a_Msg) do { } while (0)
     53# define NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_enmReg, a_Expr, a_Msg) do { } while (0)
    5454#endif
    5555
     
    5757 * Asserts the correctness of a 64-bit register value in a message/context.
    5858 */
    59 #define NEMWIN_ASSERT_MSG_REG_VAL64(a_pVCpu, a_pGVCpu, a_enmReg, a_u64Val) \
    60     NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_pGVCpu, a_enmReg, (a_u64Val) == TmpVal.Reg64, \
     59#define NEMWIN_ASSERT_MSG_REG_VAL64(a_pVCpu, a_enmReg, a_u64Val) \
     60    NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_enmReg, (a_u64Val) == TmpVal.Reg64, \
    6161                              (#a_u64Val "=%#RX64, expected %#RX64\n", (a_u64Val), TmpVal.Reg64))
    6262/** @def NEMWIN_ASSERT_MSG_REG_VAL
    6363 * Asserts the correctness of a segment register value in a message/context.
    6464 */
    65 #define NEMWIN_ASSERT_MSG_REG_SEG(a_pVCpu, a_pGVCpu, a_enmReg, a_SReg) \
    66     NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_pGVCpu, a_enmReg, \
     65#define NEMWIN_ASSERT_MSG_REG_SEG(a_pVCpu, a_enmReg, a_SReg) \
     66    NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_enmReg, \
    6767                                 (a_SReg).Base       == TmpVal.Segment.Base \
    6868                              && (a_SReg).Limit      == TmpVal.Segment.Limit \
     
    109109#ifdef IN_RING0
    110110    /** @todo optimize further, caller generally has the physical address. */
    111     PGVM pGVM = GVMMR0FastGetGVMByVM(pVM);
    112     AssertReturn(pGVM, VERR_INVALID_VM_HANDLE);
    113     return nemR0WinMapPages(pGVM, pVM, &pGVM->aCpus[pVCpu->idCpu],
     111    return nemR0WinMapPages(pVM, pVCpu,
    114112                            GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
    115113                            GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
     
    136134{
    137135# ifdef IN_RING0
    138     PGVM pGVM = GVMMR0FastGetGVMByVM(pVM);
    139     AssertReturn(pGVM, VERR_INVALID_VM_HANDLE);
    140     return nemR0WinUnmapPages(pGVM, &pGVM->aCpus[pVCpu->idCpu], GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, 1);
     136    return nemR0WinUnmapPages(pVM, pVCpu, GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, 1);
    141137# else
    142138    pVCpu->nem.s.Hypercall.UnmapPages.GCPhys    = GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
     
    398394        ADD_REG64(WHvX64RegisterTscAux, pCtxMsrs->msr.TscAux);
    399395#if 0 /** @todo these registers aren't available? Might explain something.. .*/
    400         const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM->pVM);
     396        const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pVM);
    401397        if (enmCpuVendor != CPUMCPUVENDOR_AMD)
    402398        {
     
    665661
    666662//#ifdef LOG_ENABLED
    667 //    const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM->pVM);
     663//    const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pVM);
    668664//#endif
    669665    if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
     
    12911287    AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
    12921288
    1293     pInput->PartitionId = pGVCpu->pGVM->nemr0.s.idHvPartition;
    1294     pInput->VpIndex     = pGVCpu->idCpu;
     1289    pInput->PartitionId = pVCpu->pGVM->nemr0.s.idHvPartition;
     1290    pInput->VpIndex     = pVCpu->idCpu;
    12951291    pInput->fFlags      = 0;
    12961292    pInput->Names[0]    = (HV_REGISTER_NAME)enmReg;
     
    18231819 * @param   pGVM            The global (ring-0) VM structure.
    18241820 * @param   pGVCpu          The global (ring-0) per CPU structure.
     1821 * @param   fWhat           What to import.
     1822 * @param   pszCaller       Who is doing the importing.
     1823 */
     1824DECLINLINE(VBOXSTRICTRC) nemR0WinImportStateStrict(PGVM pGVM, PGVMCPU pGVCpu, uint64_t fWhat, const char *pszCaller)
     1825{
     1826    int rc = nemR0WinImportState(pGVM, pGVCpu, &pGVCpu->cpum.GstCtx, fWhat, true /*fCanUpdateCr3*/);
     1827    if (RT_SUCCESS(rc))
     1828    {
     1829        Assert(rc == VINF_SUCCESS);
     1830        return VINF_SUCCESS;
     1831    }
     1832
     1833    if (rc == VERR_NEM_FLUSH_TLB)
     1834    {
     1835        Log4(("%s/%u: nemR0WinImportState -> %Rrc\n", pszCaller, pGVCpu->idCpu, -rc));
     1836        return -rc;
     1837    }
     1838    RT_NOREF(pszCaller);
     1839    AssertMsgFailedReturn(("%s/%u: nemR0WinImportState failed: %Rrc\n", pszCaller, pGVCpu->idCpu, rc), rc);
     1840}
     1841#endif /* IN_RING0 && NEM_WIN_TEMPLATE_MODE_OWN_RUN_API*/
     1842
     1843#if defined(NEM_WIN_TEMPLATE_MODE_OWN_RUN_API) || defined(IN_RING3)
     1844/**
     1845 * Wrapper around nemR0WinImportStateStrict and nemHCWinCopyStateFromHyperV.
     1846 *
     1847 * Unlike the wrapped APIs, this checks whether it's necessary.
     1848 *
     1849 * @returns VBox strict status code.
    18251850 * @param   pVCpu           The cross context per CPU structure.
    18261851 * @param   fWhat           What to import.
    18271852 * @param   pszCaller       Who is doing the importing.
    18281853 */
    1829 DECLINLINE(VBOXSTRICTRC) nemR0WinImportStateStrict(PGVM pGVM, PGVMCPU pGVCpu, PVMCPUCC pVCpu, uint64_t fWhat, const char *pszCaller)
    1830 {
    1831     int rc = nemR0WinImportState(pGVM, pGVCpu, &pVCpu->cpum.GstCtx, fWhat, true /*fCanUpdateCr3*/);
    1832     if (RT_SUCCESS(rc))
    1833     {
    1834         Assert(rc == VINF_SUCCESS);
    1835         return VINF_SUCCESS;
    1836     }
    1837 
    1838     if (rc == VERR_NEM_FLUSH_TLB)
    1839     {
    1840         Log4(("%s/%u: nemR0WinImportState -> %Rrc\n", pszCaller, pGVCpu->idCpu, -rc));
    1841         return -rc;
    1842     }
    1843     RT_NOREF(pszCaller);
    1844     AssertMsgFailedReturn(("%s/%u: nemR0WinImportState failed: %Rrc\n", pszCaller, pGVCpu->idCpu, rc), rc);
    1845 }
    1846 #endif /* IN_RING0 && NEM_WIN_TEMPLATE_MODE_OWN_RUN_API*/
    1847 
    1848 #if defined(NEM_WIN_TEMPLATE_MODE_OWN_RUN_API) || defined(IN_RING3)
    1849 /**
    1850  * Wrapper around nemR0WinImportStateStrict and nemHCWinCopyStateFromHyperV.
    1851  *
    1852  * Unlike the wrapped APIs, this checks whether it's necessary.
    1853  *
    1854  * @returns VBox strict status code.
    1855  * @param   pGVM            The global (ring-0) VM structure.
    1856  * @param   pGVCpu          The global (ring-0) per CPU structure.
    1857  * @param   fWhat           What to import.
    1858  * @param   pszCaller       Who is doing the importing.
    1859  */
    1860 DECLINLINE(VBOXSTRICTRC) nemHCWinImportStateIfNeededStrict(PVMCPUCC pVCpu, PGVMCPU pGVCpu, uint64_t fWhat, const char *pszCaller)
     1854DECLINLINE(VBOXSTRICTRC) nemHCWinImportStateIfNeededStrict(PVMCPUCC pVCpu, uint64_t fWhat, const char *pszCaller)
    18611855{
    18621856    if (pVCpu->cpum.GstCtx.fExtrn & fWhat)
    18631857    {
    18641858# ifdef IN_RING0
    1865         return nemR0WinImportStateStrict(pGVCpu->pGVM, pGVCpu, pVCpu, fWhat, pszCaller);
     1859        return nemR0WinImportStateStrict(pVCpu->pGVM, pVCpu, fWhat, pszCaller);
    18661860# else
    1867         RT_NOREF(pGVCpu, pszCaller);
     1861        RT_NOREF(pszCaller);
    18681862        int rc = nemHCWinCopyStateFromHyperV(pVCpu->pVMR3, pVCpu, fWhat);
    18691863        AssertRCReturn(rc, rc);
     
    19471941 * @param   pVCpu           The cross context per CPU structure.
    19481942 * @param   pMsg            The message.
    1949  * @param   pGVCpu          The global (ring-0) per CPU structure (NULL in r3).
    19501943 * @sa      nemR3WinHandleExitMemory
    19511944 */
    19521945NEM_TMPL_STATIC VBOXSTRICTRC
    1953 nemHCWinHandleMessageMemory(PVMCC pVM, PVMCPUCC pVCpu, HV_X64_MEMORY_INTERCEPT_MESSAGE const *pMsg, PGVMCPU pGVCpu)
     1946nemHCWinHandleMessageMemory(PVMCC pVM, PVMCPUCC pVCpu, HV_X64_MEMORY_INTERCEPT_MESSAGE const *pMsg)
    19541947{
    19551948    uint64_t const uHostTsc = ASMReadTSC();
     
    20302023    VBOXSTRICTRC rcStrict;
    20312024# ifdef IN_RING0
    2032     rcStrict = nemR0WinImportStateStrict(pGVCpu->pGVM, pGVCpu, pVCpu,
     2025    rcStrict = nemR0WinImportStateStrict(pVM, pVCpu,
    20332026                                         NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES, "MemExit");
    20342027    if (rcStrict != VINF_SUCCESS)
     
    20372030    rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
    20382031    AssertRCReturn(rc, rc);
    2039     NOREF(pGVCpu);
    20402032# endif
    20412033
     
    21722164 * @param   pVCpu           The cross context per CPU structure.
    21732165 * @param   pMsg            The message.
    2174  * @param   pGVCpu          The global (ring-0) per CPU structure (NULL in r3).
    21752166 */
    21762167NEM_TMPL_STATIC VBOXSTRICTRC
    2177 nemHCWinHandleMessageIoPort(PVMCC pVM, PVMCPUCC pVCpu, HV_X64_IO_PORT_INTERCEPT_MESSAGE const *pMsg, PGVMCPU pGVCpu)
     2168nemHCWinHandleMessageIoPort(PVMCC pVM, PVMCPUCC pVCpu, HV_X64_IO_PORT_INTERCEPT_MESSAGE const *pMsg)
    21782169{
    21792170    /*
     
    21852176    Assert(   pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
    21862177           || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE);
    2187     NEMWIN_ASSERT_MSG_REG_SEG(  pVCpu, pGVCpu, HvX64RegisterCs, pMsg->Header.CsSegment);
    2188     NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRip, pMsg->Header.Rip);
    2189     NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRflags, pMsg->Header.Rflags);
    2190     NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8);
    2191     NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRax, pMsg->Rax);
     2178    NEMWIN_ASSERT_MSG_REG_SEG(  pVCpu, HvX64RegisterCs, pMsg->Header.CsSegment);
     2179    NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRip, pMsg->Header.Rip);
     2180    NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRflags, pMsg->Header.Rflags);
     2181    NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8);
     2182    NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRax, pMsg->Rax);
    21922183    if (pMsg->AccessInfo.StringOp)
    21932184    {
    2194         NEMWIN_ASSERT_MSG_REG_SEG(  pVCpu, pGVCpu, HvX64RegisterDs,  pMsg->DsSegment);
    2195         NEMWIN_ASSERT_MSG_REG_SEG(  pVCpu, pGVCpu, HvX64RegisterEs,  pMsg->EsSegment);
    2196         NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRcx, pMsg->Rcx);
    2197         NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRsi, pMsg->Rsi);
    2198         NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRdi, pMsg->Rdi);
     2185        NEMWIN_ASSERT_MSG_REG_SEG(  pVCpu, HvX64RegisterDs,  pMsg->DsSegment);
     2186        NEMWIN_ASSERT_MSG_REG_SEG(  pVCpu, HvX64RegisterEs,  pMsg->EsSegment);
     2187        NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRcx, pMsg->Rcx);
     2188        NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRsi, pMsg->Rsi);
     2189        NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRdi, pMsg->Rdi);
    21992190    }
    22002191
     
    23082299            pVCpu->cpum.GstCtx.rsi = pMsg->Rsi;
    23092300# ifdef IN_RING0
    2310             rcStrict = nemR0WinImportStateStrict(pGVCpu->pGVM, pGVCpu, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "IOExit");
     2301            rcStrict = nemR0WinImportStateStrict(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "IOExit");
    23112302            if (rcStrict != VINF_SUCCESS)
    23122303                return rcStrict;
     
    23142305            int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
    23152306            AssertRCReturn(rc, rc);
    2316             RT_NOREF(pGVCpu);
    23172307# endif
    23182308
     
    23592349
    23602350# ifdef IN_RING0
    2361     rcStrict = nemR0WinImportStateStrict(pGVCpu->pGVM, pGVCpu, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "IOExit");
     2351    rcStrict = nemR0WinImportStateStrict(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "IOExit");
    23622352    if (rcStrict != VINF_SUCCESS)
    23632353        return rcStrict;
     
    23652355    int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
    23662356    AssertRCReturn(rc, rc);
    2367     RT_NOREF(pGVCpu);
    23682357# endif
    23692358
     
    25542543 * @param   pVCpu           The cross context per CPU structure.
    25552544 * @param   pMsg            The message.
    2556  * @param   pGVCpu          The global (ring-0) per CPU structure (NULL in r3).
    25572545 * @sa      nemR3WinHandleExitInterruptWindow
    25582546 */
    25592547NEM_TMPL_STATIC VBOXSTRICTRC
    2560 nemHCWinHandleMessageInterruptWindow(PVMCC pVM, PVMCPUCC pVCpu, HV_X64_INTERRUPT_WINDOW_MESSAGE const *pMsg, PGVMCPU pGVCpu)
     2548nemHCWinHandleMessageInterruptWindow(PVMCC pVM, PVMCPUCC pVCpu, HV_X64_INTERRUPT_WINDOW_MESSAGE const *pMsg)
    25612549{
    25622550    /*
     
    25802568
    25812569    /** @todo call nemHCWinHandleInterruptFF   */
    2582     RT_NOREF(pVM, pGVCpu);
     2570    RT_NOREF(pVM);
    25832571    return VINF_SUCCESS;
    25842572}
     
    26292617 * @param   pVCpu           The cross context per CPU structure.
    26302618 * @param   pMsg            The message.
    2631  * @param   pGVCpu          The global (ring-0) per CPU structure (NULL in r3).
    26322619 * @sa      nemR3WinHandleExitCpuId
    26332620 */
    2634 NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageCpuId(PVMCC pVM, PVMCPUCC pVCpu, HV_X64_CPUID_INTERCEPT_MESSAGE const *pMsg,
    2635                                                         PGVMCPU pGVCpu)
     2621NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageCpuId(PVMCC pVM, PVMCPUCC pVCpu, HV_X64_CPUID_INTERCEPT_MESSAGE const *pMsg)
    26362622{
    26372623    /* Check message register value sanity. */
    2638     NEMWIN_ASSERT_MSG_REG_SEG(  pVCpu, pGVCpu, HvX64RegisterCs, pMsg->Header.CsSegment);
    2639     NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRip, pMsg->Header.Rip);
    2640     NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRflags, pMsg->Header.Rflags);
    2641     NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8);
    2642     NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRax, pMsg->Rax);
    2643     NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRcx, pMsg->Rcx);
    2644     NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRdx, pMsg->Rdx);
    2645     NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRbx, pMsg->Rbx);
     2624    NEMWIN_ASSERT_MSG_REG_SEG(  pVCpu, HvX64RegisterCs, pMsg->Header.CsSegment);
     2625    NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRip, pMsg->Header.Rip);
     2626    NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRflags, pMsg->Header.Rflags);
     2627    NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8);
     2628    NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRax, pMsg->Rax);
     2629    NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRcx, pMsg->Rcx);
     2630    NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRdx, pMsg->Rdx);
     2631    NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRbx, pMsg->Rbx);
    26462632
    26472633    /* Do exit history. */
     
    26982684          pMsg->DefaultResultRax, pMsg->DefaultResultRcx, pMsg->DefaultResultRdx, pMsg->DefaultResultRbx));
    26992685# ifdef IN_RING0
    2700     VBOXSTRICTRC rcStrict = nemR0WinImportStateStrict(pGVCpu->pGVM, pGVCpu, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "CpuIdExit");
     2686    VBOXSTRICTRC rcStrict = nemR0WinImportStateStrict(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "CpuIdExit");
    27012687    if (rcStrict != VINF_SUCCESS)
    27022688        return rcStrict;
     
    27052691    int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
    27062692    AssertRCReturn(rc, rc);
    2707     RT_NOREF(pGVCpu);
    27082693# endif
    27092694    VBOXSTRICTRC rcStrictExec = EMHistoryExec(pVCpu, pExitRec, 0);
     
    27962781 * @param   pVCpu           The cross context per CPU structure.
    27972782 * @param   pMsg            The message.
    2798  * @param   pGVCpu          The global (ring-0) per CPU structure (NULL in r3).
    27992783 * @sa      nemR3WinHandleExitMsr
    28002784 */
    2801 NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageMsr(PVMCPUCC pVCpu, HV_X64_MSR_INTERCEPT_MESSAGE const *pMsg, PGVMCPU pGVCpu)
     2785NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageMsr(PVMCPUCC pVCpu, HV_X64_MSR_INTERCEPT_MESSAGE const *pMsg)
    28022786{
    28032787    /*
     
    28062790    Assert(   pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
    28072791           || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE);
    2808     NEMWIN_ASSERT_MSG_REG_SEG(  pVCpu, pGVCpu, HvX64RegisterCs, pMsg->Header.CsSegment);
    2809     NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRip, pMsg->Header.Rip);
    2810     NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRflags, pMsg->Header.Rflags);
    2811     NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8);
    2812     NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRax, pMsg->Rax);
    2813     NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRdx, pMsg->Rdx);
     2792    NEMWIN_ASSERT_MSG_REG_SEG(  pVCpu, HvX64RegisterCs, pMsg->Header.CsSegment);
     2793    NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRip, pMsg->Header.Rip);
     2794    NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRflags, pMsg->Header.Rflags);
     2795    NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8);
     2796    NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRax, pMsg->Rax);
     2797    NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRdx, pMsg->Rdx);
    28142798
    28152799    /*
     
    28302814
    28312815        nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
    2832         rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu,
     2816        rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu,
    28332817                                                     (!pExitRec ? 0 : IEM_CPUMCTX_EXTRN_MUST_MASK)
    28342818                                                     | CPUMCTX_EXTRN_ALL_MSRS | CPUMCTX_EXTRN_CR0
     
    29302914     * If we get down here, we're supposed to #GP(0).
    29312915     */
    2932     rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL_MSRS, "MSR");
     2916    rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL_MSRS, "MSR");
    29332917    if (rcStrict == VINF_SUCCESS)
    29342918    {
     
    29692953                                                pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
    29702954        nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
    2971         rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NULL,
     2955        rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu,
    29722956                                                     (!pExitRec ? 0 : IEM_CPUMCTX_EXTRN_MUST_MASK)
    29732957                                                     | CPUMCTX_EXTRN_ALL_MSRS | CPUMCTX_EXTRN_CR0
     
    30563040     * If we get down here, we're supposed to #GP(0).
    30573041     */
    3058     rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NULL,
    3059                                                  NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL_MSRS, "MSR");
     3042    rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL_MSRS, "MSR");
    30603043    if (rcStrict == VINF_SUCCESS)
    30613044    {
     
    31953178 * @param   pVCpu           The cross context per CPU structure.
    31963179 * @param   pMsg            The message.
    3197  * @param   pGVCpu          The global (ring-0) per CPU structure (NULL in r3).
    31983180 * @sa      nemR3WinHandleExitMsr
    31993181 */
    32003182NEM_TMPL_STATIC VBOXSTRICTRC
    3201 nemHCWinHandleMessageException(PVMCPUCC pVCpu, HV_X64_EXCEPTION_INTERCEPT_MESSAGE const *pMsg, PGVMCPU pGVCpu)
     3183nemHCWinHandleMessageException(PVMCPUCC pVCpu, HV_X64_EXCEPTION_INTERCEPT_MESSAGE const *pMsg)
    32023184{
    32033185    /*
     
    32073189           || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
    32083190           || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_EXECUTE);
    3209     NEMWIN_ASSERT_MSG_REG_SEG(  pVCpu, pGVCpu, HvX64RegisterCs, pMsg->Header.CsSegment);
    3210     NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRip, pMsg->Header.Rip);
    3211     NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRflags, pMsg->Header.Rflags);
    3212     NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8);
    3213     NEMWIN_ASSERT_MSG_REG_SEG(  pVCpu, pGVCpu, HvX64RegisterDs,  pMsg->DsSegment);
    3214     NEMWIN_ASSERT_MSG_REG_SEG(  pVCpu, pGVCpu, HvX64RegisterSs,  pMsg->SsSegment);
    3215     NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRax, pMsg->Rax);
    3216     NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRcx, pMsg->Rcx);
    3217     NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRdx, pMsg->Rdx);
    3218     NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRbx, pMsg->Rbx);
    3219     NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRsp, pMsg->Rsp);
    3220     NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRbp, pMsg->Rbp);
    3221     NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRsi, pMsg->Rsi);
    3222     NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRdi, pMsg->Rdi);
    3223     NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterR8,  pMsg->R8);
    3224     NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterR9,  pMsg->R9);
    3225     NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterR10, pMsg->R10);
    3226     NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterR11, pMsg->R11);
    3227     NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterR12, pMsg->R12);
    3228     NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterR13, pMsg->R13);
    3229     NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterR14, pMsg->R14);
    3230     NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterR15, pMsg->R15);
     3191    NEMWIN_ASSERT_MSG_REG_SEG(  pVCpu, HvX64RegisterCs, pMsg->Header.CsSegment);
     3192    NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRip, pMsg->Header.Rip);
     3193    NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRflags, pMsg->Header.Rflags);
     3194    NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8);
     3195    NEMWIN_ASSERT_MSG_REG_SEG(  pVCpu, HvX64RegisterDs,  pMsg->DsSegment);
     3196    NEMWIN_ASSERT_MSG_REG_SEG(  pVCpu, HvX64RegisterSs,  pMsg->SsSegment);
     3197    NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRax, pMsg->Rax);
     3198    NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRcx, pMsg->Rcx);
     3199    NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRdx, pMsg->Rdx);
     3200    NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRbx, pMsg->Rbx);
     3201    NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRsp, pMsg->Rsp);
     3202    NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRbp, pMsg->Rbp);
     3203    NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRsi, pMsg->Rsi);
     3204    NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRdi, pMsg->Rdi);
     3205    NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR8,  pMsg->R8);
     3206    NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR9,  pMsg->R9);
     3207    NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR10, pMsg->R10);
     3208    NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR11, pMsg->R11);
     3209    NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR12, pMsg->R12);
     3210    NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR13, pMsg->R13);
     3211    NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR14, pMsg->R14);
     3212    NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR15, pMsg->R15);
    32313213
    32323214    /*
     
    32433225    if (pMsg->ExceptionVector == X86_XCPT_DB)
    32443226        fWhat |= CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR7 | CPUMCTX_EXTRN_DR6;
    3245     VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, fWhat, "Xcpt");
     3227    VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, fWhat, "Xcpt");
    32463228    if (rcStrict != VINF_SUCCESS)
    32473229        return rcStrict;
     
    33413323    if (pExit->VpException.ExceptionType == X86_XCPT_DB)
    33423324        fWhat |= CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR7 | CPUMCTX_EXTRN_DR6;
    3343     VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NULL, fWhat, "Xcpt");
     3325    VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, fWhat, "Xcpt");
    33443326    if (rcStrict != VINF_SUCCESS)
    33453327        return rcStrict;
     
    34313413 * @param   pVCpu           The cross context per CPU structure.
    34323414 * @param   pMsgHdr         The message header.
    3433  * @param   pGVCpu          The global (ring-0) per CPU structure (NULL in r3).
    34343415 * @sa      nemR3WinHandleExitUnrecoverableException
    34353416 */
    34363417NEM_TMPL_STATIC VBOXSTRICTRC
    3437 nemHCWinHandleMessageUnrecoverableException(PVMCPUCC pVCpu, HV_X64_INTERCEPT_MESSAGE_HEADER const *pMsgHdr, PGVMCPU pGVCpu)
     3418nemHCWinHandleMessageUnrecoverableException(PVMCPUCC pVCpu, HV_X64_INTERCEPT_MESSAGE_HEADER const *pMsgHdr)
    34383419{
    34393420    /* Check message register value sanity. */
    3440     NEMWIN_ASSERT_MSG_REG_SEG(  pVCpu, pGVCpu, HvX64RegisterCs, pMsgHdr->CsSegment);
    3441     NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRip, pMsgHdr->Rip);
    3442     NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRflags, pMsgHdr->Rflags);
    3443     NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterCr8, (uint64_t)pMsgHdr->Cr8);
     3421    NEMWIN_ASSERT_MSG_REG_SEG(  pVCpu, HvX64RegisterCs, pMsgHdr->CsSegment);
     3422    NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRip, pMsgHdr->Rip);
     3423    NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRflags, pMsgHdr->Rflags);
     3424    NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterCr8, (uint64_t)pMsgHdr->Cr8);
    34443425
    34453426# if 0
     
    34583439                     pMsgHdr->Rip + pMsgHdr->CsSegment.Base, ASMReadTSC());
    34593440    nemHCWinCopyStateFromX64Header(pVCpu, pMsgHdr);
    3460     VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu,
    3461                                                               NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL, "TripleExit");
     3441    VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL, "TripleExit");
    34623442    if (rcStrict == VINF_SUCCESS)
    34633443    {
     
    35113491                     pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
    35123492    nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
    3513     VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NULL,
    3514                                                               NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL, "TripleExit");
     3493    VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL, "TripleExit");
    35153494    if (rcStrict == VINF_SUCCESS)
    35163495    {
     
    35493528 * @param   pVCpu           The cross context per CPU structure.
    35503529 * @param   pMappingHeader  The message slot mapping.
    3551  * @param   pGVCpu          The global (ring-0) per CPU structure (NULL in r3).
    35523530 * @sa      nemR3WinHandleExit
    35533531 */
    35543532NEM_TMPL_STATIC VBOXSTRICTRC
    3555 nemHCWinHandleMessage(PVMCC pVM, PVMCPUCC pVCpu, VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader, PGVMCPU pGVCpu)
     3533nemHCWinHandleMessage(PVMCC pVM, PVMCPUCC pVCpu, VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader)
    35563534{
    35573535    if (pMappingHeader->enmVidMsgType == VidMessageHypervisorMessage)
     
    35643542                Assert(pMsg->Header.PayloadSize == RT_UOFFSETOF(HV_X64_MEMORY_INTERCEPT_MESSAGE, DsSegment));
    35653543                STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemUnmapped);
    3566                 return nemHCWinHandleMessageMemory(pVM, pVCpu, &pMsg->X64MemoryIntercept, pGVCpu);
     3544                return nemHCWinHandleMessageMemory(pVM, pVCpu, &pMsg->X64MemoryIntercept);
    35673545
    35683546            case HvMessageTypeGpaIntercept:
    35693547                Assert(pMsg->Header.PayloadSize == RT_UOFFSETOF(HV_X64_MEMORY_INTERCEPT_MESSAGE, DsSegment));
    35703548                STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemIntercept);
    3571                 return nemHCWinHandleMessageMemory(pVM, pVCpu, &pMsg->X64MemoryIntercept, pGVCpu);
     3549                return nemHCWinHandleMessageMemory(pVM, pVCpu, &pMsg->X64MemoryIntercept);
    35723550
    35733551            case HvMessageTypeX64IoPortIntercept:
    35743552                Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64IoPortIntercept));
    35753553                STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitPortIo);
    3576                 return nemHCWinHandleMessageIoPort(pVM, pVCpu, &pMsg->X64IoPortIntercept, pGVCpu);
     3554                return nemHCWinHandleMessageIoPort(pVM, pVCpu, &pMsg->X64IoPortIntercept);
    35773555
    35783556            case HvMessageTypeX64Halt:
     
    35863564                Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64InterruptWindow));
    35873565                STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitInterruptWindow);
    3588                 return nemHCWinHandleMessageInterruptWindow(pVM, pVCpu, &pMsg->X64InterruptWindow, pGVCpu);
     3566                return nemHCWinHandleMessageInterruptWindow(pVM, pVCpu, &pMsg->X64InterruptWindow);
    35893567
    35903568            case HvMessageTypeX64CpuidIntercept:
    35913569                Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64CpuIdIntercept));
    35923570                STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitCpuId);
    3593                 return nemHCWinHandleMessageCpuId(pVM, pVCpu, &pMsg->X64CpuIdIntercept, pGVCpu);
     3571                return nemHCWinHandleMessageCpuId(pVM, pVCpu, &pMsg->X64CpuIdIntercept);
    35943572
    35953573            case HvMessageTypeX64MsrIntercept:
    35963574                Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64MsrIntercept));
    35973575                STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMsr);
    3598                 return nemHCWinHandleMessageMsr(pVCpu, &pMsg->X64MsrIntercept, pGVCpu);
     3576                return nemHCWinHandleMessageMsr(pVCpu, &pMsg->X64MsrIntercept);
    35993577
    36003578            case HvMessageTypeX64ExceptionIntercept:
    36013579                Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64ExceptionIntercept));
    36023580                STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitException);
    3603                 return nemHCWinHandleMessageException(pVCpu, &pMsg->X64ExceptionIntercept, pGVCpu);
     3581                return nemHCWinHandleMessageException(pVCpu, &pMsg->X64ExceptionIntercept);
    36043582
    36053583            case HvMessageTypeUnrecoverableException:
    36063584                Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64InterceptHeader));
    36073585                STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitUnrecoverable);
    3608                 return nemHCWinHandleMessageUnrecoverableException(pVCpu, &pMsg->X64InterceptHeader, pGVCpu);
     3586                return nemHCWinHandleMessageUnrecoverableException(pVCpu, &pMsg->X64InterceptHeader);
    36093587
    36103588            case HvMessageTypeInvalidVpRegisterValue:
     
    37123690 * @returns NT status code.
    37133691 * @param   pGVM            The ring-0 VM structure.
    3714  * @param   pGVCpu          The ring-0 CPU structure.
    3715  * @param   pVCpu           The calling cross context CPU structure.
     3692 * @param   pGVCpu          The global (ring-0) per CPU structure.
    37163693 * @param   fFlags          The wait flags.
    37173694 * @param   cMillies        The timeout in milliseconds
    37183695 */
    3719 static NTSTATUS nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(PGVM pGVM, PGVMCPU pGVCpu, PVMCPUCC pVCpu,
    3720                                                                uint32_t fFlags, uint32_t cMillies)
    3721 {
    3722     pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu     = pGVCpu->idCpu;
    3723     pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags   = fFlags;
    3724     pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = cMillies;
    3725     NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pVCpu, pGVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.uFunction,
    3726                                             &pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext,
     3696static NTSTATUS nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(PGVM pGVM, PGVMCPU pGVCpu, uint32_t fFlags, uint32_t cMillies)
     3697{
     3698    pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu     = pGVCpu->idCpu;
     3699    pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags   = fFlags;
     3700    pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = cMillies;
     3701    NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pGVCpu, pGVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.uFunction,
     3702                                            &pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext,
    37273703                                            pGVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.cbInput,
    37283704                                            NULL, 0);
     
    37403716             || rcNt == STATUS_USER_APC   /* just in case */)
    37413717    {
    3742         DBGFTRACE_CUSTOM(pVCpu->CTX_SUFF(pVM), "IoCtlMessageSlotHandleAndGetNextRestart/1 %#x (f=%#x)", rcNt, fFlags);
    3743         STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuPendingAlerts);
     3718        DBGFTRACE_CUSTOM(pGVCpu->CTX_SUFF(pVM), "IoCtlMessageSlotHandleAndGetNextRestart/1 %#x (f=%#x)", rcNt, fFlags);
     3719        STAM_REL_COUNTER_INC(&pGVCpu->nem.s.StatStopCpuPendingAlerts);
    37443720        Assert(fFlags & VID_MSHAGN_F_GET_NEXT_MESSAGE);
    37453721
    3746         pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu     = pVCpu->idCpu;
    3747         pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags   = fFlags & ~VID_MSHAGN_F_HANDLE_MESSAGE;
    3748         pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = cMillies;
    3749         rcNt = nemR0NtPerformIoControl(pGVM, pVCpu, pGVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.uFunction,
    3750                                        &pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext,
     3722        pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu     = pGVCpu->idCpu;
     3723        pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags   = fFlags & ~VID_MSHAGN_F_HANDLE_MESSAGE;
     3724        pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = cMillies;
     3725        rcNt = nemR0NtPerformIoControl(pGVM, pGVCpu, pGVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.uFunction,
     3726                                       &pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext,
    37513727                                       pGVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.cbInput,
    37523728                                       NULL, 0);
    3753         DBGFTRACE_CUSTOM(pVCpu->CTX_SUFF(pVM), "IoCtlMessageSlotHandleAndGetNextRestart/2 %#x", rcNt);
     3729        DBGFTRACE_CUSTOM(pGVM, "IoCtlMessageSlotHandleAndGetNextRestart/2 %#x", rcNt);
    37543730    }
    37553731    return rcNt;
    37563732}
    3757 
    37583733#endif /* IN_RING0 */
    37593734
     
    37743749 *                          exit.
    37753750 * @param   pMappingHeader  The message slot mapping.
    3776  * @param   pGVM            The global (ring-0) VM structure (NULL in r3).
    3777  * @param   pGVCpu          The global (ring-0) per CPU structure (NULL in r3).
    37783751 */
    37793752NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinStopCpu(PVMCC pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict,
    3780                                              VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader,
    3781                                              PGVM pGVM, PGVMCPU pGVCpu)
     3753                                             VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader)
    37823754{
    37833755# ifdef DBGFTRACE_ENABLED
     
    37913763    DBGFTRACE_CUSTOM(pVM, "nemStop#0");
    37923764# ifdef IN_RING0
    3793     pVCpu->nem.s.uIoCtlBuf.idCpu = pGVCpu->idCpu;
    3794     NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pVCpu, pGVM->nemr0.s.IoCtlStopVirtualProcessor.uFunction,
     3765    pVCpu->nem.s.uIoCtlBuf.idCpu = pVCpu->idCpu;
     3766    NTSTATUS rcNt = nemR0NtPerformIoControl(pVM, pVCpu, pVM->nemr0.s.IoCtlStopVirtualProcessor.uFunction,
    37953767                                            &pVCpu->nem.s.uIoCtlBuf.idCpu, sizeof(pVCpu->nem.s.uIoCtlBuf.idCpu),
    37963768                                            NULL, 0);
     
    38113783        return rcStrict;
    38123784    }
    3813     RT_NOREF(pGVM, pGVCpu);
    38143785# endif
    38153786
     
    38353806     */
    38363807# ifdef IN_RING0
    3837     rcNt = nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(pGVM, pGVCpu, pVCpu, VID_MSHAGN_F_GET_NEXT_MESSAGE, 30000 /*ms*/);
     3808    rcNt = nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(pVM, pVCpu, VID_MSHAGN_F_GET_NEXT_MESSAGE, 30000 /*ms*/);
    38383809    DBGFTRACE_CUSTOM(pVM, "nemStop#1: %#x / %#x %#x %#x", rcNt, pMappingHeader->enmVidMsgType, pMappingHeader->cbMessage,
    38393810                     pMsgForTrace->Header.MessageType);
     
    38533824    if (enmVidMsgType != VidMessageStopRequestComplete)
    38543825    {
    3855         VBOXSTRICTRC rcStrict2 = nemHCWinHandleMessage(pVM, pVCpu, pMappingHeader, pGVCpu);
     3826        VBOXSTRICTRC rcStrict2 = nemHCWinHandleMessage(pVM, pVCpu, pMappingHeader);
    38563827        if (rcStrict2 != VINF_SUCCESS && RT_SUCCESS(rcStrict))
    38573828            rcStrict = rcStrict2;
     
    38633834         */
    38643835# ifdef IN_RING0
    3865         rcNt = nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(pGVM, pGVCpu, pVCpu,
     3836        rcNt = nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(pVM, pVCpu,
    38663837                                                              VID_MSHAGN_F_HANDLE_MESSAGE | VID_MSHAGN_F_GET_NEXT_MESSAGE,
    38673838                                                              30000 /*ms*/);
     
    38913862         */
    38923863# ifdef IN_RING0
    3893         rcNt = nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(pGVM, pGVCpu, pVCpu, VID_MSHAGN_F_HANDLE_MESSAGE, 30000 /*ms*/);
     3864        rcNt = nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(pVM, pVCpu, VID_MSHAGN_F_HANDLE_MESSAGE, 30000 /*ms*/);
    38943865        DBGFTRACE_CUSTOM(pVM, "nemStop#3: %#x / %#x %#x %#x", rcNt, pMappingHeader->enmVidMsgType,
    38953866                         pMsgForTrace->Header.MessageType, pMappingHeader->cbMessage, pMsgForTrace->Header.MessageType);
     
    39273898 * @param   pVM                 The cross context VM structure.
    39283899 * @param   pVCpu               The cross context per CPU structure.
    3929  * @param   pGVCpu              The global (ring-0) per CPU structure.
    39303900 * @param   pfInterruptWindows  Where to return interrupt window flags.
    39313901 */
    3932 NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleInterruptFF(PVMCC pVM, PVMCPUCC pVCpu, PGVMCPU pGVCpu, uint8_t *pfInterruptWindows)
     3902NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleInterruptFF(PVMCC pVM, PVMCPUCC pVCpu, uint8_t *pfInterruptWindows)
    39333903{
    39343904    Assert(!TRPMHasTrap(pVCpu));
     
    39613931    if (pVCpu->cpum.GstCtx.fExtrn & fNeedExtrn)
    39623932    {
    3963         VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu,
    3964                                                                   NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "IntFF");
     3933        VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "IntFF");
    39653934        if (rcStrict != VINF_SUCCESS)
    39663935            return rcStrict;
     
    39773946            && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
    39783947        {
    3979             VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu,
    3980                                                                       NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "NMI");
     3948            VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "NMI");
    39813949            if (rcStrict == VINF_SUCCESS)
    39823950            {
     
    40003968        {
    40013969            AssertCompile(NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT & CPUMCTX_EXTRN_APIC_TPR);
    4002             VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu,
    4003                                                                       NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "NMI");
     3970            VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "NMI");
    40043971            if (rcStrict == VINF_SUCCESS)
    40053972            {
     
    40354002 * @param   pVM             The cross context VM structure.
    40364003 * @param   pVCpu           The cross context per CPU structure.
    4037  * @param   pGVM            The ring-0 VM structure (NULL in ring-3).
    4038  * @param   pGVCpu          The ring-0 per CPU structure (NULL in ring-3).
    40394004 */
    4040 NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinRunGC(PVMCC pVM, PVMCPUCC pVCpu, PGVM pGVM, PGVMCPU pGVCpu)
     4005NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinRunGC(PVMCC pVM, PVMCPUCC pVCpu)
    40414006{
    40424007    LogFlow(("NEM/%u: %04x:%08RX64 efl=%#08RX64 <=\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags));
     
    40444009    if (LogIs3Enabled())
    40454010        nemHCWinLogState(pVM, pVCpu);
    4046 # endif
    4047 # ifdef IN_RING0
    4048     Assert(pVCpu->idCpu == pGVCpu->idCpu);
    40494011# endif
    40504012
     
    41024064            {
    41034065                pVCpu->nem.s.fHandleAndGetFlags = 0;
    4104                 rcStrict = nemHCWinStopCpu(pVM, pVCpu, rcStrict, pMappingHeader, pGVM, pGVCpu);
     4066                rcStrict = nemHCWinStopCpu(pVM, pVCpu, rcStrict, pMappingHeader);
    41054067                if (rcStrict == VINF_SUCCESS)
    41064068                { /* likely */ }
     
    41154077
    41164078            /* Try inject interrupt. */
    4117             rcStrict = nemHCWinHandleInterruptFF(pVM, pVCpu, pGVCpu, &pVCpu->nem.s.fDesiredInterruptWindows);
     4079            rcStrict = nemHCWinHandleInterruptFF(pVM, pVCpu, &pVCpu->nem.s.fDesiredInterruptWindows);
    41184080            if (rcStrict == VINF_SUCCESS)
    41194081            { /* likely */ }
     
    41484110# endif
    41494111# ifdef IN_RING0
    4150             int rc2 = nemR0WinExportState(pGVM, pGVCpu, &pVCpu->cpum.GstCtx);
     4112            int rc2 = nemR0WinExportState(pVM, pVCpu, &pVCpu->cpum.GstCtx);
    41514113# else
    41524114            int rc2 = nemHCWinCopyStateToHyperV(pVM, pVCpu);
    4153             RT_NOREF(pGVM, pGVCpu);
    41544115# endif
    41554116            AssertRCReturn(rc2, rc2);
     
    41764137            {
    41774138#  ifdef IN_RING0
    4178                 pVCpu->nem.s.uIoCtlBuf.idCpu = pGVCpu->idCpu;
    4179                 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pVCpu, pGVM->nemr0.s.IoCtlStartVirtualProcessor.uFunction,
     4139                pVCpu->nem.s.uIoCtlBuf.idCpu = pVCpu->idCpu;
     4140                NTSTATUS rcNt = nemR0NtPerformIoControl(pVM, pVCpu, pVM->nemr0.s.IoCtlStartVirtualProcessor.uFunction,
    41804141                                                        &pVCpu->nem.s.uIoCtlBuf.idCpu, sizeof(pVCpu->nem.s.uIoCtlBuf.idCpu),
    41814142                                                        NULL, 0);
    41824143                LogFlow(("NEM/%u: IoCtlStartVirtualProcessor -> %#x\n", pVCpu->idCpu, rcNt));
    4183                 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("VidStartVirtualProcessor failed for CPU #%u: %#x\n", pGVCpu->idCpu, rcNt),
     4144                AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("VidStartVirtualProcessor failed for CPU #%u: %#x\n", pVCpu->idCpu, rcNt),
    41844145                                      VERR_NEM_IPE_5);
    41854146#  else
     
    42114172                    cMsWait = RT_MS_1SEC;
    42124173#  ifdef IN_RING0
    4213                 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu     = pGVCpu->idCpu;
     4174                pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu     = pVCpu->idCpu;
    42144175                pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags   = pVCpu->nem.s.fHandleAndGetFlags;
    42154176                pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = cMsWait;
    4216                 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pVCpu, pGVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.uFunction,
     4177                NTSTATUS rcNt = nemR0NtPerformIoControl(pVM, pVCpu, pVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.uFunction,
    42174178                                                        &pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext,
    4218                                                         pGVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.cbInput,
     4179                                                        pVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.cbInput,
    42194180                                                        NULL, 0);
    42204181                VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
     
    42384199                     */
    42394200# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
    4240                     rcStrict = nemHCWinHandleMessage(pVM, pVCpu, pMappingHeader, pGVCpu);
     4201                    rcStrict = nemHCWinHandleMessage(pVM, pVCpu, pMappingHeader);
    42414202                    pVCpu->nem.s.fHandleAndGetFlags |= VID_MSHAGN_F_HANDLE_MESSAGE;
    42424203# else
     
    43154276    {
    43164277        pVCpu->nem.s.fHandleAndGetFlags = 0;
    4317         rcStrict = nemHCWinStopCpu(pVM, pVCpu, rcStrict, pMappingHeader, pGVM, pGVCpu);
     4278        rcStrict = nemHCWinStopCpu(pVM, pVCpu, rcStrict, pMappingHeader);
    43184279    }
    43194280# endif
     
    43434304        {
    43444305# ifdef IN_RING0
    4345             int rc2 = nemR0WinImportState(pGVM, pGVCpu, &pVCpu->cpum.GstCtx, fImport | CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT,
     4306            int rc2 = nemR0WinImportState(pVM, pVCpu, &pVCpu->cpum.GstCtx, fImport | CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT,
    43464307                                          true /*fCanUpdateCr3*/);
    43474308            if (RT_SUCCESS(rc2))
  • trunk/src/VBox/VMM/VMMR0/GMMR0.cpp

    r80334 r80346  
    15411541 *
    15421542 * @param   pGVM            The global (ring-0) VM structure.
    1543  * @param   pVM             The cross context VM structure.
    15441543 * @param   idCpu           The VCPU id - must be zero.
    15451544 * @param   cBasePages      The number of pages that may be allocated for the base RAM and ROMs.
     
    15531552 * @thread  The creator thread / EMT(0).
    15541553 */
    1555 GMMR0DECL(int) GMMR0InitialReservation(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, uint64_t cBasePages, uint32_t cShadowPages,
     1554GMMR0DECL(int) GMMR0InitialReservation(PGVM pGVM, VMCPUID idCpu, uint64_t cBasePages, uint32_t cShadowPages,
    15561555                                       uint32_t cFixedPages, GMMOCPOLICY enmPolicy, GMMPRIORITY enmPriority)
    15571556{
    1558     LogFlow(("GMMR0InitialReservation: pGVM=%p pVM=%p cBasePages=%#llx cShadowPages=%#x cFixedPages=%#x enmPolicy=%d enmPriority=%d\n",
    1559              pGVM, pVM, cBasePages, cShadowPages, cFixedPages, enmPolicy, enmPriority));
     1557    LogFlow(("GMMR0InitialReservation: pGVM=%p cBasePages=%#llx cShadowPages=%#x cFixedPages=%#x enmPolicy=%d enmPriority=%d\n",
     1558             pGVM, cBasePages, cShadowPages, cFixedPages, enmPolicy, enmPriority));
    15601559
    15611560    /*
     
    15651564    PGMM pGMM;
    15661565    GMM_GET_VALID_INSTANCE(pGMM, VERR_GMM_INSTANCE);
    1567     int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
     1566    int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
    15681567    if (RT_FAILURE(rc))
    15691568        return rc;
     
    16191618 * @returns see GMMR0InitialReservation.
    16201619 * @param   pGVM            The global (ring-0) VM structure.
    1621  * @param   pVM             The cross context VM structure.
    16221620 * @param   idCpu           The VCPU id.
    16231621 * @param   pReq            Pointer to the request packet.
    16241622 */
    1625 GMMR0DECL(int) GMMR0InitialReservationReq(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, PGMMINITIALRESERVATIONREQ pReq)
     1623GMMR0DECL(int) GMMR0InitialReservationReq(PGVM pGVM, VMCPUID idCpu, PGMMINITIALRESERVATIONREQ pReq)
    16261624{
    16271625    /*
    16281626     * Validate input and pass it on.
    16291627     */
    1630     AssertPtrReturn(pVM, VERR_INVALID_POINTER);
     1628    AssertPtrReturn(pGVM, VERR_INVALID_POINTER);
    16311629    AssertPtrReturn(pReq, VERR_INVALID_POINTER);
    16321630    AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
    16331631
    1634     return GMMR0InitialReservation(pGVM, pVM, idCpu, pReq->cBasePages, pReq->cShadowPages,
     1632    return GMMR0InitialReservation(pGVM, idCpu, pReq->cBasePages, pReq->cShadowPages,
    16351633                                   pReq->cFixedPages, pReq->enmPolicy, pReq->enmPriority);
    16361634}
     
    16441642 *
    16451643 * @param   pGVM            The global (ring-0) VM structure.
    1646  * @param   pVM             The cross context VM structure.
    16471644 * @param   idCpu           The VCPU id.
    16481645 * @param   cBasePages      The number of pages that may be allocated for the base RAM and ROMs.
     
    16541651 * @thread  EMT(idCpu)
    16551652 */
    1656 GMMR0DECL(int) GMMR0UpdateReservation(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, uint64_t cBasePages,
     1653GMMR0DECL(int) GMMR0UpdateReservation(PGVM pGVM, VMCPUID idCpu, uint64_t cBasePages,
    16571654                                      uint32_t cShadowPages, uint32_t cFixedPages)
    16581655{
    1659     LogFlow(("GMMR0UpdateReservation: pGVM=%p pVM=%p cBasePages=%#llx cShadowPages=%#x cFixedPages=%#x\n",
    1660              pGVM, pVM, cBasePages, cShadowPages, cFixedPages));
     1656    LogFlow(("GMMR0UpdateReservation: pGVM=%p cBasePages=%#llx cShadowPages=%#x cFixedPages=%#x\n",
     1657             pGVM, cBasePages, cShadowPages, cFixedPages));
    16611658
    16621659    /*
     
    16651662    PGMM pGMM;
    16661663    GMM_GET_VALID_INSTANCE(pGMM, VERR_GMM_INSTANCE);
    1667     int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
     1664    int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
    16681665    if (RT_FAILURE(rc))
    16691666        return rc;
     
    17161713 * @returns see GMMR0UpdateReservation.
    17171714 * @param   pGVM            The global (ring-0) VM structure.
    1718  * @param   pVM             The cross context VM structure.
    17191715 * @param   idCpu           The VCPU id.
    17201716 * @param   pReq            Pointer to the request packet.
    17211717 */
    1722 GMMR0DECL(int) GMMR0UpdateReservationReq(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, PGMMUPDATERESERVATIONREQ pReq)
     1718GMMR0DECL(int) GMMR0UpdateReservationReq(PGVM pGVM, VMCPUID idCpu, PGMMUPDATERESERVATIONREQ pReq)
    17231719{
    17241720    /*
    17251721     * Validate input and pass it on.
    17261722     */
    1727     AssertPtrReturn(pVM, VERR_INVALID_POINTER);
    17281723    AssertPtrReturn(pReq, VERR_INVALID_POINTER);
    17291724    AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
    17301725
    1731     return GMMR0UpdateReservation(pGVM, pVM, idCpu, pReq->cBasePages, pReq->cShadowPages, pReq->cFixedPages);
     1726    return GMMR0UpdateReservation(pGVM, idCpu, pReq->cBasePages, pReq->cShadowPages, pReq->cFixedPages);
    17321727}
    17331728
     
    27292724 *
    27302725 * @param   pGVM                The global (ring-0) VM structure.
    2731  * @param   pVM                 The cross context VM structure.
    27322726 * @param   idCpu               The VCPU id.
    27332727 * @param   cPagesToUpdate      The number of pages to update (starting from the head).
     
    27372731 * @thread  EMT(idCpu)
    27382732 */
    2739 GMMR0DECL(int) GMMR0AllocateHandyPages(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, uint32_t cPagesToUpdate,
     2733GMMR0DECL(int) GMMR0AllocateHandyPages(PGVM pGVM, VMCPUID idCpu, uint32_t cPagesToUpdate,
    27402734                                       uint32_t cPagesToAlloc, PGMMPAGEDESC paPages)
    27412735{
    2742     LogFlow(("GMMR0AllocateHandyPages: pGVM=%p pVM=%p cPagesToUpdate=%#x cPagesToAlloc=%#x paPages=%p\n",
    2743              pGVM, pVM, cPagesToUpdate, cPagesToAlloc, paPages));
     2736    LogFlow(("GMMR0AllocateHandyPages: pGVM=%p cPagesToUpdate=%#x cPagesToAlloc=%#x paPages=%p\n",
     2737             pGVM, cPagesToUpdate, cPagesToAlloc, paPages));
    27442738
    27452739    /*
     
    27492743    PGMM pGMM;
    27502744    GMM_GET_VALID_INSTANCE(pGMM, VERR_GMM_INSTANCE);
    2751     int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
     2745    int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
    27522746    if (RT_FAILURE(rc))
    27532747        return rc;
     
    29252919 *
    29262920 * @param   pGVM        The global (ring-0) VM structure.
    2927  * @param   pVM         The cross context VM structure.
    29282921 * @param   idCpu       The VCPU id.
    29292922 * @param   cPages      The number of pages to allocate.
     
    29352928 * @thread  EMT.
    29362929 */
    2937 GMMR0DECL(int) GMMR0AllocatePages(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, uint32_t cPages, PGMMPAGEDESC paPages, GMMACCOUNT enmAccount)
    2938 {
    2939     LogFlow(("GMMR0AllocatePages: pGVM=%p pVM=%p cPages=%#x paPages=%p enmAccount=%d\n", pGVM, pVM, cPages, paPages, enmAccount));
     2930GMMR0DECL(int) GMMR0AllocatePages(PGVM pGVM, VMCPUID idCpu, uint32_t cPages, PGMMPAGEDESC paPages, GMMACCOUNT enmAccount)
     2931{
     2932    LogFlow(("GMMR0AllocatePages: pGVM=%p cPages=%#x paPages=%p enmAccount=%d\n", pGVM, cPages, paPages, enmAccount));
    29402933
    29412934    /*
     
    29442937    PGMM pGMM;
    29452938    GMM_GET_VALID_INSTANCE(pGMM, VERR_GMM_INSTANCE);
    2946     int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
     2939    int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
    29472940    if (RT_FAILURE(rc))
    29482941        return rc;
     
    29912984 * @returns see GMMR0AllocatePages.
    29922985 * @param   pGVM        The global (ring-0) VM structure.
    2993  * @param   pVM         The cross context VM structure.
    29942986 * @param   idCpu       The VCPU id.
    29952987 * @param   pReq        Pointer to the request packet.
    29962988 */
    2997 GMMR0DECL(int) GMMR0AllocatePagesReq(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, PGMMALLOCATEPAGESREQ pReq)
     2989GMMR0DECL(int) GMMR0AllocatePagesReq(PGVM pGVM, VMCPUID idCpu, PGMMALLOCATEPAGESREQ pReq)
    29982990{
    29992991    /*
     
    30083000                    VERR_INVALID_PARAMETER);
    30093001
    3010     return GMMR0AllocatePages(pGVM, pVM, idCpu, pReq->cPages, &pReq->aPages[0], pReq->enmAccount);
     3002    return GMMR0AllocatePages(pGVM, idCpu, pReq->cPages, &pReq->aPages[0], pReq->enmAccount);
    30113003}
    30123004
     
    30273019 *
    30283020 * @param   pGVM        The global (ring-0) VM structure.
    3029  * @param   pVM         The cross context VM structure.
    30303021 * @param   idCpu       The VCPU id.
    30313022 * @param   cbPage      Large page size.
     
    30333024 * @param   pHCPhys     Where to return the host physical address of the page.
    30343025 */
    3035 GMMR0DECL(int)  GMMR0AllocateLargePage(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, uint32_t cbPage, uint32_t *pIdPage, RTHCPHYS *pHCPhys)
    3036 {
    3037     LogFlow(("GMMR0AllocateLargePage: pGVM=%p pVM=%p cbPage=%x\n", pGVM, pVM, cbPage));
     3026GMMR0DECL(int)  GMMR0AllocateLargePage(PGVM pGVM, VMCPUID idCpu, uint32_t cbPage, uint32_t *pIdPage, RTHCPHYS *pHCPhys)
     3027{
     3028    LogFlow(("GMMR0AllocateLargePage: pGVM=%p cbPage=%x\n", pGVM, cbPage));
    30383029
    30393030    AssertReturn(cbPage == GMM_CHUNK_SIZE, VERR_INVALID_PARAMETER);
     
    30463037    PGMM pGMM;
    30473038    GMM_GET_VALID_INSTANCE(pGMM, VERR_GMM_INSTANCE);
    3048     int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
     3039    int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
    30493040    if (RT_FAILURE(rc))
    30503041        return rc;
     
    31343125 * @returns VBox status code:
    31353126 * @param   pGVM        The global (ring-0) VM structure.
    3136  * @param   pVM         The cross context VM structure.
    31373127 * @param   idCpu       The VCPU id.
    31383128 * @param   idPage      The large page id.
    31393129 */
    3140 GMMR0DECL(int)  GMMR0FreeLargePage(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, uint32_t idPage)
    3141 {
    3142     LogFlow(("GMMR0FreeLargePage: pGVM=%p pVM=%p idPage=%x\n", pGVM, pVM, idPage));
     3130GMMR0DECL(int)  GMMR0FreeLargePage(PGVM pGVM, VMCPUID idCpu, uint32_t idPage)
     3131{
     3132    LogFlow(("GMMR0FreeLargePage: pGVM=%p idPage=%x\n", pGVM, idPage));
    31433133
    31443134    /*
     
    31473137    PGMM pGMM;
    31483138    GMM_GET_VALID_INSTANCE(pGMM, VERR_GMM_INSTANCE);
    3149     int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
     3139    int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
    31503140    if (RT_FAILURE(rc))
    31513141        return rc;
     
    32013191 * @returns see GMMR0FreeLargePage.
    32023192 * @param   pGVM        The global (ring-0) VM structure.
    3203  * @param   pVM         The cross context VM structure.
    32043193 * @param   idCpu       The VCPU id.
    32053194 * @param   pReq        Pointer to the request packet.
    32063195 */
    3207 GMMR0DECL(int) GMMR0FreeLargePageReq(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, PGMMFREELARGEPAGEREQ pReq)
     3196GMMR0DECL(int) GMMR0FreeLargePageReq(PGVM pGVM, VMCPUID idCpu, PGMMFREELARGEPAGEREQ pReq)
    32083197{
    32093198    /*
     
    32153204                    VERR_INVALID_PARAMETER);
    32163205
    3217     return GMMR0FreeLargePage(pGVM, pVM, idCpu, pReq->idPage);
     3206    return GMMR0FreeLargePage(pGVM, idCpu, pReq->idPage);
    32183207}
    32193208
     
    35623551 *
    35633552 * @param   pGVM        The global (ring-0) VM structure.
    3564  * @param   pVM         The cross context VM structure.
    35653553 * @param   idCpu       The VCPU id.
    35663554 * @param   cPages      The number of pages to allocate.
     
    35703558 * @thread  EMT.
    35713559 */
    3572 GMMR0DECL(int) GMMR0FreePages(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, uint32_t cPages, PGMMFREEPAGEDESC paPages, GMMACCOUNT enmAccount)
    3573 {
    3574     LogFlow(("GMMR0FreePages: pGVM=%p pVM=%p cPages=%#x paPages=%p enmAccount=%d\n", pGVM, pVM, cPages, paPages, enmAccount));
     3560GMMR0DECL(int) GMMR0FreePages(PGVM pGVM, VMCPUID idCpu, uint32_t cPages, PGMMFREEPAGEDESC paPages, GMMACCOUNT enmAccount)
     3561{
     3562    LogFlow(("GMMR0FreePages: pGVM=%p cPages=%#x paPages=%p enmAccount=%d\n", pGVM, cPages, paPages, enmAccount));
    35753563
    35763564    /*
     
    35793567    PGMM pGMM;
    35803568    GMM_GET_VALID_INSTANCE(pGMM, VERR_GMM_INSTANCE);
    3581     int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
     3569    int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
    35823570    if (RT_FAILURE(rc))
    35833571        return rc;
     
    36143602 * @returns see GMMR0FreePages.
    36153603 * @param   pGVM        The global (ring-0) VM structure.
    3616  * @param   pVM         The cross context VM structure.
    36173604 * @param   idCpu       The VCPU id.
    36183605 * @param   pReq        Pointer to the request packet.
    36193606 */
    3620 GMMR0DECL(int) GMMR0FreePagesReq(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, PGMMFREEPAGESREQ pReq)
     3607GMMR0DECL(int) GMMR0FreePagesReq(PGVM pGVM, VMCPUID idCpu, PGMMFREEPAGESREQ pReq)
    36213608{
    36223609    /*
     
    36313618                    VERR_INVALID_PARAMETER);
    36323619
    3633     return GMMR0FreePages(pGVM, pVM, idCpu, pReq->cPages, &pReq->aPages[0], pReq->enmAccount);
     3620    return GMMR0FreePages(pGVM, idCpu, pReq->cPages, &pReq->aPages[0], pReq->enmAccount);
    36343621}
    36353622
     
    36523639 *
    36533640 * @param   pGVM                The global (ring-0) VM structure.
    3654  * @param   pVM                 The cross context VM structure.
    36553641 * @param   idCpu               The VCPU id.
    36563642 * @param   enmAction           Inflate/deflate/reset.
     
    36593645 * @thread  EMT(idCpu)
    36603646 */
    3661 GMMR0DECL(int) GMMR0BalloonedPages(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, GMMBALLOONACTION enmAction, uint32_t cBalloonedPages)
    3662 {
    3663     LogFlow(("GMMR0BalloonedPages: pGVM=%p pVM=%p enmAction=%d cBalloonedPages=%#x\n",
    3664              pGVM, pVM, enmAction, cBalloonedPages));
     3647GMMR0DECL(int) GMMR0BalloonedPages(PGVM pGVM, VMCPUID idCpu, GMMBALLOONACTION enmAction, uint32_t cBalloonedPages)
     3648{
     3649    LogFlow(("GMMR0BalloonedPages: pGVM=%p enmAction=%d cBalloonedPages=%#x\n",
     3650             pGVM, enmAction, cBalloonedPages));
    36653651
    36663652    AssertMsgReturn(cBalloonedPages < RT_BIT(32 - PAGE_SHIFT), ("%#x\n", cBalloonedPages), VERR_INVALID_PARAMETER);
     
    36713657    PGMM pGMM;
    36723658    GMM_GET_VALID_INSTANCE(pGMM, VERR_GMM_INSTANCE);
    3673     int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
     3659    int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
    36743660    if (RT_FAILURE(rc))
    36753661        return rc;
     
    37843770 * @returns see GMMR0BalloonedPages.
    37853771 * @param   pGVM        The global (ring-0) VM structure.
    3786  * @param   pVM         The cross context VM structure.
    37873772 * @param   idCpu       The VCPU id.
    37883773 * @param   pReq        Pointer to the request packet.
    37893774 */
    3790 GMMR0DECL(int) GMMR0BalloonedPagesReq(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, PGMMBALLOONEDPAGESREQ pReq)
     3775GMMR0DECL(int) GMMR0BalloonedPagesReq(PGVM pGVM, VMCPUID idCpu, PGMMBALLOONEDPAGESREQ pReq)
    37913776{
    37923777    /*
     
    37983783                    VERR_INVALID_PARAMETER);
    37993784
    3800     return GMMR0BalloonedPages(pGVM, pVM, idCpu, pReq->enmAction, pReq->cBalloonedPages);
     3785    return GMMR0BalloonedPages(pGVM, idCpu, pReq->enmAction, pReq->cBalloonedPages);
    38013786}
    38023787
     
    38393824 * @returns VBox status code.
    38403825 * @param   pGVM        The global (ring-0) VM structure.
    3841  * @param   pVM         The cross context VM structure.
    38423826 * @param   idCpu       Cpu id.
    38433827 * @param   pReq        Pointer to the request packet.
     
    38453829 * @thread  EMT(idCpu)
    38463830 */
    3847 GMMR0DECL(int) GMMR0QueryMemoryStatsReq(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, PGMMMEMSTATSREQ pReq)
     3831GMMR0DECL(int) GMMR0QueryMemoryStatsReq(PGVM pGVM, VMCPUID idCpu, PGMMMEMSTATSREQ pReq)
    38483832{
    38493833    /*
     
    38603844    PGMM pGMM;
    38613845    GMM_GET_VALID_INSTANCE(pGMM, VERR_GMM_INSTANCE);
    3862     int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
     3846    int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
    38633847    if (RT_FAILURE(rc))
    38643848        return rc;
     
    41314115 * @returns VBox status code.
    41324116 * @param   pGVM            The global (ring-0) VM structure.
    4133  * @param   pVM             The cross context VM structure.
    41344117 * @param   idChunkMap      The chunk to map. NIL_GMM_CHUNKID if nothing to map.
    41354118 * @param   idChunkUnmap    The chunk to unmap. NIL_GMM_CHUNKID if nothing to unmap.
     
    41374120 * @thread  EMT ???
    41384121 */
    4139 GMMR0DECL(int) GMMR0MapUnmapChunk(PGVM pGVM, PVMCC pVM, uint32_t idChunkMap, uint32_t idChunkUnmap, PRTR3PTR ppvR3)
    4140 {
    4141     LogFlow(("GMMR0MapUnmapChunk: pGVM=%p pVM=%p idChunkMap=%#x idChunkUnmap=%#x ppvR3=%p\n",
    4142              pGVM, pVM, idChunkMap, idChunkUnmap, ppvR3));
     4122GMMR0DECL(int) GMMR0MapUnmapChunk(PGVM pGVM, uint32_t idChunkMap, uint32_t idChunkUnmap, PRTR3PTR ppvR3)
     4123{
     4124    LogFlow(("GMMR0MapUnmapChunk: pGVM=%p idChunkMap=%#x idChunkUnmap=%#x ppvR3=%p\n",
     4125             pGVM, idChunkMap, idChunkUnmap, ppvR3));
    41434126
    41444127    /*
     
    41474130    PGMM pGMM;
    41484131    GMM_GET_VALID_INSTANCE(pGMM, VERR_GMM_INSTANCE);
    4149     int rc = GVMMR0ValidateGVMandVM(pGVM, pVM);
     4132    int rc = GVMMR0ValidateGVM(pGVM);
    41504133    if (RT_FAILURE(rc))
    41514134        return rc;
     
    42234206 * @returns see GMMR0MapUnmapChunk.
    42244207 * @param   pGVM        The global (ring-0) VM structure.
    4225  * @param   pVM         The cross context VM structure.
    42264208 * @param   pReq        Pointer to the request packet.
    42274209 */
    4228 GMMR0DECL(int)  GMMR0MapUnmapChunkReq(PGVM pGVM, PVMCC pVM, PGMMMAPUNMAPCHUNKREQ pReq)
     4210GMMR0DECL(int)  GMMR0MapUnmapChunkReq(PGVM pGVM, PGMMMAPUNMAPCHUNKREQ pReq)
    42294211{
    42304212    /*
     
    42344216    AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
    42354217
    4236     return GMMR0MapUnmapChunk(pGVM, pVM, pReq->idChunkMap, pReq->idChunkUnmap, &pReq->pvR3);
     4218    return GMMR0MapUnmapChunk(pGVM, pReq->idChunkMap, pReq->idChunkUnmap, &pReq->pvR3);
    42374219}
    42384220
     
    42464228 * @returns VBox status code.
    42474229 * @param   pGVM        The global (ring-0) VM structure.
    4248  * @param   pVM         The cross context VM structure.
    42494230 * @param   idCpu       The VCPU id.
    42504231 * @param   pvR3        Pointer to the chunk size memory block to lock down.
    42514232 */
    4252 GMMR0DECL(int) GMMR0SeedChunk(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, RTR3PTR pvR3)
     4233GMMR0DECL(int) GMMR0SeedChunk(PGVM pGVM, VMCPUID idCpu, RTR3PTR pvR3)
    42534234{
    42544235    /*
     
    42574238    PGMM pGMM;
    42584239    GMM_GET_VALID_INSTANCE(pGMM, VERR_GMM_INSTANCE);
    4259     int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
     4240    int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
    42604241    if (RT_FAILURE(rc))
    42614242        return rc;
     
    45304511 * @returns VBox status code.
    45314512 * @param   pGVM            The global (ring-0) VM structure.
    4532  * @param   pVM             The cross context VM structure.
    45334513 * @param   idCpu           The VCPU id.
    45344514 * @param   enmGuestOS      The guest OS type.
     
    45414521 * @thread  EMT(idCpu)
    45424522 */
    4543 GMMR0DECL(int) GMMR0RegisterSharedModule(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, VBOXOSFAMILY enmGuestOS, char *pszModuleName,
     4523GMMR0DECL(int) GMMR0RegisterSharedModule(PGVM pGVM, VMCPUID idCpu, VBOXOSFAMILY enmGuestOS, char *pszModuleName,
    45444524                                         char *pszVersion, RTGCPTR GCPtrModBase, uint32_t cbModule,
    45454525                                         uint32_t cRegions, struct VMMDEVSHAREDREGIONDESC const *paRegions)
     
    45544534    PGMM pGMM;
    45554535    GMM_GET_VALID_INSTANCE(pGMM, VERR_GMM_INSTANCE);
    4556     int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
     4536    int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
    45574537    if (RT_FAILURE(rc))
    45584538        return rc;
     
    46664646#else
    46674647
    4668     NOREF(pGVM); NOREF(pVM); NOREF(idCpu); NOREF(enmGuestOS); NOREF(pszModuleName); NOREF(pszVersion);
     4648    NOREF(pGVM); NOREF(idCpu); NOREF(enmGuestOS); NOREF(pszModuleName); NOREF(pszVersion);
    46694649    NOREF(GCPtrModBase); NOREF(cbModule); NOREF(cRegions); NOREF(paRegions);
    46704650    return VERR_NOT_IMPLEMENTED;
     
    46784658 * @returns see GMMR0RegisterSharedModule.
    46794659 * @param   pGVM        The global (ring-0) VM structure.
    4680  * @param   pVM         The cross context VM structure.
    46814660 * @param   idCpu       The VCPU id.
    46824661 * @param   pReq        Pointer to the request packet.
    46834662 */
    4684 GMMR0DECL(int) GMMR0RegisterSharedModuleReq(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, PGMMREGISTERSHAREDMODULEREQ pReq)
     4663GMMR0DECL(int) GMMR0RegisterSharedModuleReq(PGVM pGVM, VMCPUID idCpu, PGMMREGISTERSHAREDMODULEREQ pReq)
    46854664{
    46864665    /*
     
    46934672
    46944673    /* Pass back return code in the request packet to preserve informational codes. (VMMR3CallR0 chokes on them) */
    4695     pReq->rc = GMMR0RegisterSharedModule(pGVM, pVM, idCpu, pReq->enmGuestOS, pReq->szName, pReq->szVersion,
     4674    pReq->rc = GMMR0RegisterSharedModule(pGVM, idCpu, pReq->enmGuestOS, pReq->szName, pReq->szVersion,
    46964675                                         pReq->GCBaseAddr, pReq->cbModule, pReq->cRegions, pReq->aRegions);
    46974676    return VINF_SUCCESS;
     
    47044683 * @returns VBox status code.
    47054684 * @param   pGVM            The global (ring-0) VM structure.
    4706  * @param   pVM             The cross context VM structure.
    47074685 * @param   idCpu           The VCPU id.
    47084686 * @param   pszModuleName   The module name.
     
    47114689 * @param   cbModule        The module size.
    47124690 */
    4713 GMMR0DECL(int) GMMR0UnregisterSharedModule(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, char *pszModuleName, char *pszVersion,
     4691GMMR0DECL(int) GMMR0UnregisterSharedModule(PGVM pGVM, VMCPUID idCpu, char *pszModuleName, char *pszVersion,
    47144692                                           RTGCPTR GCPtrModBase, uint32_t cbModule)
    47154693{
     
    47204698    PGMM pGMM;
    47214699    GMM_GET_VALID_INSTANCE(pGMM, VERR_GMM_INSTANCE);
    4722     int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
     4700    int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
    47234701    if (RT_FAILURE(rc))
    47244702        return rc;
     
    47634741#else
    47644742
    4765     NOREF(pGVM); NOREF(pVM); NOREF(idCpu); NOREF(pszModuleName); NOREF(pszVersion); NOREF(GCPtrModBase); NOREF(cbModule);
     4743    NOREF(pGVM); NOREF(idCpu); NOREF(pszModuleName); NOREF(pszVersion); NOREF(GCPtrModBase); NOREF(cbModule);
    47664744    return VERR_NOT_IMPLEMENTED;
    47674745#endif
     
    47744752 * @returns see GMMR0UnregisterSharedModule.
    47754753 * @param   pGVM        The global (ring-0) VM structure.
    4776  * @param   pVM         The cross context VM structure.
    47774754 * @param   idCpu       The VCPU id.
    47784755 * @param   pReq        Pointer to the request packet.
    47794756 */
    4780 GMMR0DECL(int)  GMMR0UnregisterSharedModuleReq(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, PGMMUNREGISTERSHAREDMODULEREQ pReq)
     4757GMMR0DECL(int)  GMMR0UnregisterSharedModuleReq(PGVM pGVM, VMCPUID idCpu, PGMMUNREGISTERSHAREDMODULEREQ pReq)
    47814758{
    47824759    /*
     
    47864763    AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
    47874764
    4788     return GMMR0UnregisterSharedModule(pGVM, pVM, idCpu, pReq->szName, pReq->szVersion, pReq->GCBaseAddr, pReq->cbModule);
     4765    return GMMR0UnregisterSharedModule(pGVM, idCpu, pReq->szName, pReq->szVersion, pReq->GCBaseAddr, pReq->cbModule);
    47894766}
    47904767
     
    50815058 * @returns VBox status code.
    50825059 * @param   pGVM        The global (ring-0) VM structure.
    5083  * @param   pVM         The cross context VM structure.
    50845060 * @param   idCpu       The VCPU id.
    50855061 */
    5086 GMMR0DECL(int) GMMR0ResetSharedModules(PGVM pGVM, PVMCC pVM, VMCPUID idCpu)
     5062GMMR0DECL(int) GMMR0ResetSharedModules(PGVM pGVM, VMCPUID idCpu)
    50875063{
    50885064#ifdef VBOX_WITH_PAGE_SHARING
     
    50925068    PGMM pGMM;
    50935069    GMM_GET_VALID_INSTANCE(pGMM, VERR_GMM_INSTANCE);
    5094     int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
     5070    int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
    50955071    if (RT_FAILURE(rc))
    50965072        return rc;
     
    51185094    return rc;
    51195095#else
    5120     RT_NOREF(pGVM, pVM, idCpu);
     5096    RT_NOREF(pGVM, idCpu);
    51215097    return VERR_NOT_IMPLEMENTED;
    51225098#endif
     
    51505126 * @returns VBox status code.
    51515127 * @param   pGVM        The global (ring-0) VM structure.
    5152  * @param   pVM         The cross context VM structure.
    51535128 * @param   idCpu       The calling EMT number.
    51545129 * @thread  EMT(idCpu)
    51555130 */
    5156 GMMR0DECL(int) GMMR0CheckSharedModules(PGVM pGVM, PVMCC pVM, VMCPUID idCpu)
     5131GMMR0DECL(int) GMMR0CheckSharedModules(PGVM pGVM, VMCPUID idCpu)
    51575132{
    51585133#ifdef VBOX_WITH_PAGE_SHARING
     
    51625137    PGMM pGMM;
    51635138    GMM_GET_VALID_INSTANCE(pGMM, VERR_GMM_INSTANCE);
    5164     int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
     5139    int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
    51655140    if (RT_FAILURE(rc))
    51665141        return rc;
     
    51955170    return rc;
    51965171#else
    5197     RT_NOREF(pGVM, pVM, idCpu);
     5172    RT_NOREF(pGVM, idCpu);
    51985173    return VERR_NOT_IMPLEMENTED;
    51995174#endif
     
    52525227 * @returns VBox status code.
    52535228 * @param   pGVM        The global (ring-0) VM structure.
    5254  * @param   pVM         The cross context VM structure.
    52555229 * @param   pReq        Pointer to the request packet.
    52565230 */
    5257 GMMR0DECL(int) GMMR0FindDuplicatePageReq(PGVM pGVM, PVMCC pVM, PGMMFINDDUPLICATEPAGEREQ pReq)
     5231GMMR0DECL(int) GMMR0FindDuplicatePageReq(PGVM pGVM, PGMMFINDDUPLICATEPAGEREQ pReq)
    52585232{
    52595233    /*
     
    52665240    GMM_GET_VALID_INSTANCE(pGMM, VERR_GMM_INSTANCE);
    52675241
    5268     int rc = GVMMR0ValidateGVMandVM(pGVM, pVM);
     5242    int rc = GVMMR0ValidateGVM(pGVM);
    52695243    if (RT_FAILURE(rc))
    52705244        return rc;
     
    53255299 * @param   pSession    The current session.
    53265300 * @param   pGVM        The GVM to obtain statistics for. Optional.
    5327  * @param   pVM         The VM structure corresponding to @a pGVM.
    5328  */
    5329 GMMR0DECL(int) GMMR0QueryStatistics(PGMMSTATS pStats, PSUPDRVSESSION pSession, PGVM pGVM, PVMCC pVM)
    5330 {
    5331     LogFlow(("GVMMR0QueryStatistics: pStats=%p pSession=%p pGVM=%p pVM=%p\n", pStats, pSession, pGVM, pVM));
     5301 */
     5302GMMR0DECL(int) GMMR0QueryStatistics(PGMMSTATS pStats, PSUPDRVSESSION pSession, PGVM pGVM)
     5303{
     5304    LogFlow(("GVMMR0QueryStatistics: pStats=%p pSession=%p pGVM=%p\n", pStats, pSession, pGVM));
    53325305
    53335306    /*
     
    53475320    if (pGVM)
    53485321    {
    5349         rc = GVMMR0ValidateGVMandVM(pGVM, pVM);
     5322        rc = GVMMR0ValidateGVM(pGVM);
    53505323        if (RT_FAILURE(rc))
    53515324            return rc;
     
    53905363 * @returns see GMMR0QueryStatistics.
    53915364 * @param   pGVM        The global (ring-0) VM structure. Optional.
    5392  * @param   pVM         The cross context VM structure. Optional.
    53935365 * @param   pReq        Pointer to the request packet.
    53945366 */
    5395 GMMR0DECL(int) GMMR0QueryStatisticsReq(PGVM pGVM, PVMCC pVM, PGMMQUERYSTATISTICSSREQ pReq)
     5367GMMR0DECL(int) GMMR0QueryStatisticsReq(PGVM pGVM, PGMMQUERYSTATISTICSSREQ pReq)
    53965368{
    53975369    /*
     
    54015373    AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
    54025374
    5403     return GMMR0QueryStatistics(&pReq->Stats, pReq->pSession, pGVM, pVM);
     5375    return GMMR0QueryStatistics(&pReq->Stats, pReq->pSession, pGVM);
    54045376}
    54055377
     
    54145386 * @param   pSession    The current session.
    54155387 * @param   pGVM        The GVM to reset statistics for. Optional.
    5416  * @param   pVM         The VM structure corresponding to @a pGVM.
    5417  */
    5418 GMMR0DECL(int) GMMR0ResetStatistics(PCGMMSTATS pStats, PSUPDRVSESSION pSession, PGVM pGVM, PVMCC pVM)
    5419 {
    5420     NOREF(pStats); NOREF(pSession); NOREF(pVM); NOREF(pGVM);
     5388 */
     5389GMMR0DECL(int) GMMR0ResetStatistics(PCGMMSTATS pStats, PSUPDRVSESSION pSession, PGVM pGVM)
     5390{
     5391    NOREF(pStats); NOREF(pSession); NOREF(pGVM);
    54215392    /* Currently nothing we can reset at the moment. */
    54225393    return VINF_SUCCESS;
     
    54295400 * @returns see GMMR0ResetStatistics.
    54305401 * @param   pGVM        The global (ring-0) VM structure. Optional.
    5431  * @param   pVM         The cross context VM structure. Optional.
    54325402 * @param   pReq        Pointer to the request packet.
    54335403 */
    5434 GMMR0DECL(int) GMMR0ResetStatisticsReq(PGVM pGVM, PVMCC pVM, PGMMRESETSTATISTICSSREQ pReq)
     5404GMMR0DECL(int) GMMR0ResetStatisticsReq(PGVM pGVM, PGMMRESETSTATISTICSSREQ pReq)
    54355405{
    54365406    /*
     
    54405410    AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
    54415411
    5442     return GMMR0ResetStatistics(&pReq->Stats, pReq->pSession, pGVM, pVM);
    5443 }
    5444 
     5412    return GMMR0ResetStatistics(&pReq->Stats, pReq->pSession, pGVM);
     5413}
     5414
  • trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp

    r80336 r80346  
    119119        } \
    120120    } while (0)
    121 # define GVMM_CHECK_SMAP_CHECK2(a_pVM, a_BadExpr) \
     121# define GVMM_CHECK_SMAP_CHECK2(a_pGVM, a_BadExpr) \
    122122    do { \
    123123        if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
     
    128128            else \
    129129            { \
    130                 SUPR0BadContext((a_pVM) ? (a_pVM)->pSession : NULL, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \
     130                SUPR0BadContext((a_pGVM) ? (a_pGVM)->pSession : NULL, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \
    131131                a_BadExpr; \
    132132            } \
     
    136136# define GVMM_CHECK_SMAP_SETUP()           uint32_t const fKernelFeatures = 0
    137137# define GVMM_CHECK_SMAP_CHECK(a_BadExpr)           NOREF(fKernelFeatures)
    138 # define GVMM_CHECK_SMAP_CHECK2(a_pVM, a_BadExpr)   NOREF(fKernelFeatures)
     138# define GVMM_CHECK_SMAP_CHECK2(a_pGVM, a_BadExpr)   NOREF(fKernelFeatures)
    139139#endif
    140140
     
    159159    /** The pointer to the ring-0 only (aka global) VM structure. */
    160160    PGVM                pGVM;
    161     /** The ring-0 mapping of the shared VM instance data. */
    162     PVMCC                 pVM;
     161    /** The ring-0 mapping of the shared VM instance data.
     162     * @todo remove this  */
     163    PVMCC               pVMRemoveMe;
    163164    /** The virtual machine object. */
    164165    void               *pvObj;
     
    361362static void gvmmR0InitPerVMData(PGVM pGVM, int16_t hSelf, VMCPUID cCpus, PSUPDRVSESSION pSession);
    362363static DECLCALLBACK(void) gvmmR0HandleObjDestructor(void *pvObj, void *pvGVMM, void *pvHandle);
    363 static int gvmmR0ByGVMandVM(PGVM pGVM, PVMCC pVM, PGVMM *ppGVMM, bool fTakeUsedLock);
    364 static int gvmmR0ByGVMandVMandEMT(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, PGVMM *ppGVMM);
     364static int gvmmR0ByGVM(PGVM pGVM, PGVMM *ppGVMM, bool fTakeUsedLock);
     365static int gvmmR0ByGVMandEMT(PGVM pGVM, VMCPUID idCpu, PGVMM *ppGVMM);
    365366
    366367#ifdef GVMM_SCHED_WITH_PPT
     
    528529    PGVMM pGVMM = g_pGVMM;
    529530    g_pGVMM = NULL;
    530     if (RT_UNLIKELY(!VALID_PTR(pGVMM)))
     531    if (RT_UNLIKELY(!RT_VALID_PTR(pGVMM)))
    531532    {
    532533        SUPR0Printf("GVMMR0Term: pGVMM=%RKv\n", pGVMM);
     
    779780     * Validate the request.
    780781     */
    781     if (!VALID_PTR(pReq))
     782    if (!RT_VALID_PTR(pReq))
    782783        return VERR_INVALID_POINTER;
    783784    if (pReq->Hdr.cbReq != sizeof(*pReq))
     
    789790     * Execute it.
    790791     */
    791     PVMCC pVM;
     792    PGVM pGVM;
    792793    pReq->pVMR0 = NULL;
    793794    pReq->pVMR3 = NIL_RTR3PTR;
    794     int rc = GVMMR0CreateVM(pSession, pReq->cCpus, &pVM);
     795    int rc = GVMMR0CreateVM(pSession, pReq->cCpus, &pGVM);
    795796    if (RT_SUCCESS(rc))
    796797    {
    797         pReq->pVMR0 = pVM;
    798         pReq->pVMR3 = pVM->pVMR3;
     798        pReq->pVMR0 = pGVM; /** @todo don't expose this to ring-3, use a unique random number instead. */
     799        pReq->pVMR3 = pGVM->pVMR3;
    799800    }
    800801    return rc;
     
    810811 * @param   pSession    The support driver session.
    811812 * @param   cCpus       Number of virtual CPUs for the new VM.
    812  * @param   ppVM        Where to store the pointer to the VM structure.
     813 * @param   ppGVM       Where to store the pointer to the VM structure.
    813814 *
    814815 * @thread  EMT.
    815816 */
    816 GVMMR0DECL(int) GVMMR0CreateVM(PSUPDRVSESSION pSession, uint32_t cCpus, PVMCC *ppVM)
     817GVMMR0DECL(int) GVMMR0CreateVM(PSUPDRVSESSION pSession, uint32_t cCpus, PGVM *ppGVM)
    817818{
    818819    LogFlow(("GVMMR0CreateVM: pSession=%p\n", pSession));
     
    820821    GVMM_GET_VALID_INSTANCE(pGVMM, VERR_GVMM_INSTANCE);
    821822
    822     AssertPtrReturn(ppVM, VERR_INVALID_POINTER);
    823     *ppVM = NULL;
     823    AssertPtrReturn(ppGVM, VERR_INVALID_POINTER);
     824    *ppGVM = NULL;
    824825
    825826    if (    cCpus == 0
     
    857858
    858859        /* consistency checks, a bit paranoid as always. */
    859         if (    !pHandle->pVM
     860        if (    !pHandle->pVMRemoveMe
    860861            &&  !pHandle->pGVM
    861862            &&  !pHandle->pvObj
     
    876877                pGVMM->cVMs++;
    877878
    878                 pHandle->pVM      = NULL;
     879                pHandle->pVMRemoveMe = NULL;
    879880                pHandle->pGVM     = NULL;
    880881                pHandle->pSession = pSession;
     
    967968                                AssertRC(rc);
    968969
    969                                 pHandle->pVM                    = pGVM;
     970                                pHandle->pVMRemoveMe            = pGVM;
    970971                                pHandle->pGVM                   = pGVM;
    971972                                pHandle->hEMT0                  = hEMT0;
     
    994995                                        CPUMR0RegisterVCpuThread(&pGVM->aCpus[0]);
    995996
    996                                         *ppVM = pGVM;
     997                                        *ppGVM = pGVM;
    997998                                        Log(("GVMMR0CreateVM: pVMR3=%p pGVM=%p hGVM=%d\n", pVMR3, pGVM, iHandle));
    998999                                        return VINF_SUCCESS;
     
    11921193 * @returns VBox status code.
    11931194 * @param   pGVM        The global (ring-0) VM structure.
    1194  * @param   pVM         The cross context VM structure.
    11951195 *
    11961196 * @thread  EMT(0) if it's associated with the VM, otherwise any thread.
    11971197 */
    1198 GVMMR0DECL(int) GVMMR0DestroyVM(PGVM pGVM, PVMCC pVM)
    1199 {
    1200     LogFlow(("GVMMR0DestroyVM: pGVM=%p pVM=%p\n", pGVM, pVM));
     1198GVMMR0DECL(int) GVMMR0DestroyVM(PGVM pGVM)
     1199{
     1200    LogFlow(("GVMMR0DestroyVM: pGVM=%p\n", pGVM));
    12011201    PGVMM pGVMM;
    12021202    GVMM_GET_VALID_INSTANCE(pGVMM, VERR_GVMM_INSTANCE);
     
    12061206     */
    12071207    AssertPtrReturn(pGVM, VERR_INVALID_POINTER);
    1208     AssertPtrReturn(pVM, VERR_INVALID_POINTER);
    1209     AssertReturn(!((uintptr_t)pVM & PAGE_OFFSET_MASK), VERR_INVALID_POINTER);
    1210     AssertReturn(pGVM == pVM, VERR_INVALID_POINTER);
    1211     AssertMsgReturn(pVM->enmVMState >= VMSTATE_CREATING && pVM->enmVMState <= VMSTATE_TERMINATED, ("%d\n", pVM->enmVMState),
     1208    AssertReturn(!((uintptr_t)pGVM & PAGE_OFFSET_MASK), VERR_INVALID_POINTER);
     1209    AssertMsgReturn(pGVM->enmVMState >= VMSTATE_CREATING && pGVM->enmVMState <= VMSTATE_TERMINATED, ("%d\n", pGVM->enmVMState),
    12121210                    VERR_WRONG_ORDER);
    12131211
     
    12181216
    12191217    PGVMHANDLE      pHandle = &pGVMM->aHandles[hGVM];
    1220     AssertReturn(pHandle->pVM == pVM, VERR_NOT_OWNER);
     1218    AssertReturn(pHandle->pGVM == pGVM, VERR_NOT_OWNER);
    12211219
    12221220    RTPROCESS       ProcId = RTProcSelf();
     
    12351233
    12361234    /* Be careful here because we might theoretically be racing someone else cleaning up. */
    1237     if (   pHandle->pVM == pVM
     1235    if (   pHandle->pGVM == pGVM
    12381236        && (   (   pHandle->hEMT0  == hSelf
    12391237                && pHandle->ProcId == ProcId)
    12401238            || pHandle->hEMT0 == NIL_RTNATIVETHREAD)
    1241         && VALID_PTR(pHandle->pvObj)
    1242         && VALID_PTR(pHandle->pSession)
    1243         && VALID_PTR(pHandle->pGVM)
     1239        && RT_VALID_PTR(pHandle->pvObj)
     1240        && RT_VALID_PTR(pHandle->pSession)
     1241        && RT_VALID_PTR(pHandle->pGVM)
    12441242        && pHandle->pGVM->u32Magic == GVM_MAGIC)
    12451243    {
     
    12651263    else
    12661264    {
    1267         SUPR0Printf("GVMMR0DestroyVM: pHandle=%RKv:{.pVM=%p, .hEMT0=%p, .ProcId=%u, .pvObj=%p} pVM=%p hSelf=%p\n",
    1268                     pHandle, pHandle->pVM, pHandle->hEMT0, pHandle->ProcId, pHandle->pvObj, pVM, hSelf);
     1265        SUPR0Printf("GVMMR0DestroyVM: pHandle=%RKv:{.pGVM=%p, .hEMT0=%p, .ProcId=%u, .pvObj=%p} pGVM=%p hSelf=%p\n",
     1266                    pHandle, pHandle->pGVM, pHandle->hEMT0, pHandle->ProcId, pHandle->pvObj, pGVM, hSelf);
    12691267        gvmmR0CreateDestroyUnlock(pGVMM);
    12701268        rc = VERR_GVMM_IPE_2;
     
    12891287        {
    12901288            LogFlow(("gvmmR0CleanupVM: Calling VMMR0TermVM\n"));
    1291             VMMR0TermVM(pGVM, pGVM, NIL_VMCPUID);
     1289            VMMR0TermVM(pGVM, NIL_VMCPUID);
    12921290        }
    12931291        else
     
    13991397     */
    14001398    PGVM pGVM = pHandle->pGVM;
    1401     if (   VALID_PTR(pGVM)
     1399    if (   RT_VALID_PTR(pGVM)
    14021400        && pGVM->u32Magic == GVM_MAGIC)
    14031401    {
     
    14651463    pGVMM->iFreeHead = iHandle;
    14661464    ASMAtomicWriteNullPtr(&pHandle->pGVM);
    1467     ASMAtomicWriteNullPtr(&pHandle->pVM);
     1465    ASMAtomicWriteNullPtr(&pHandle->pVMRemoveMe);
    14681466    ASMAtomicWriteNullPtr(&pHandle->pvObj);
    14691467    ASMAtomicWriteNullPtr(&pHandle->pSession);
     
    14841482 * @returns VBox status code
    14851483 * @param   pGVM        The global (ring-0) VM structure.
    1486  * @param   pVM         The cross context VM structure.
    14871484 * @param   idCpu       VCPU id to register the current thread as.
    14881485 */
    1489 GVMMR0DECL(int) GVMMR0RegisterVCpu(PGVM pGVM, PVMCC pVM, VMCPUID idCpu)
     1486GVMMR0DECL(int) GVMMR0RegisterVCpu(PGVM pGVM, VMCPUID idCpu)
    14901487{
    14911488    AssertReturn(idCpu != 0, VERR_INVALID_FUNCTION);
     
    14951492     */
    14961493    PGVMM pGVMM;
    1497     int rc = gvmmR0ByGVMandVM(pGVM, pVM, &pGVMM, false /* fTakeUsedLock */); /** @todo take lock here. */
     1494    int rc = gvmmR0ByGVM(pGVM, &pGVMM, false /* fTakeUsedLock */); /** @todo take lock here. */
    14981495    if (RT_SUCCESS(rc))
    14991496    {
     
    15401537 * @returns VBox status code
    15411538 * @param   pGVM        The global (ring-0) VM structure.
    1542  * @param   pVM         The cross context VM structure.
    15431539 * @param   idCpu       VCPU id to register the current thread as.
    15441540 */
    1545 GVMMR0DECL(int) GVMMR0DeregisterVCpu(PGVM pGVM, PVMCC pVM, VMCPUID idCpu)
     1541GVMMR0DECL(int) GVMMR0DeregisterVCpu(PGVM pGVM, VMCPUID idCpu)
    15461542{
    15471543    AssertReturn(idCpu != 0, VERR_INVALID_FUNCTION);
     
    15511547     */
    15521548    PGVMM pGVMM;
    1553     int rc = gvmmR0ByGVMandVMandEMT(pGVM, pVM, idCpu, &pGVMM);
     1549    int rc = gvmmR0ByGVMandEMT(pGVM, idCpu, &pGVMM);
    15541550    if (RT_SUCCESS(rc))
    15551551    {
     
    16061602     */
    16071603    PGVMHANDLE pHandle = &pGVMM->aHandles[hGVM];
    1608     AssertPtrReturn(pHandle->pVM, NULL);
     1604    AssertPtrReturn(pHandle->pVMRemoveMe, NULL);
    16091605    AssertPtrReturn(pHandle->pvObj, NULL);
    16101606    PGVM pGVM = pHandle->pGVM;
    16111607    AssertPtrReturn(pGVM, NULL);
    1612     AssertReturn(pGVM == pHandle->pVM, NULL);
    1613 
    1614     return pHandle->pGVM;
    1615 }
    1616 
    1617 
    1618 /**
    1619  * Lookup a GVM structure by the shared VM structure.
     1608
     1609    return pGVM;
     1610}
     1611
     1612
     1613/**
     1614 * Check that the given GVM and VM structures match up.
    16201615 *
    16211616 * The calling thread must be in the same process as the VM. All current lookups
     
    16231618 *
    16241619 * @returns VBox status code.
    1625  * @param   pVM             The cross context VM structure.
    1626  * @param   ppGVM           Where to store the GVM pointer.
     1620 * @param   pGVM            The global (ring-0) VM structure.
    16271621 * @param   ppGVMM          Where to store the pointer to the GVMM instance data.
    16281622 * @param   fTakeUsedLock   Whether to take the used lock or not.  We take it in
     
    16321626 *                          possible that the VM will disappear then!
    16331627 *
    1634  * @remark  This will not assert on an invalid pVM but try return silently.
    1635  */
    1636 static int gvmmR0ByVM(PVMCC pVM, PGVM *ppGVM, PGVMM *ppGVMM, bool fTakeUsedLock)
    1637 {
    1638     RTPROCESS ProcId = RTProcSelf();
    1639     PGVMM pGVMM;
    1640     GVMM_GET_VALID_INSTANCE(pGVMM, VERR_GVMM_INSTANCE);
    1641 
    1642     /*
    1643      * Validate.
    1644      */
    1645     if (RT_UNLIKELY(    !VALID_PTR(pVM)
    1646                     ||  ((uintptr_t)pVM & PAGE_OFFSET_MASK)))
    1647         return VERR_INVALID_POINTER;
    1648     if (RT_UNLIKELY(    pVM->enmVMState < VMSTATE_CREATING
    1649                     ||  pVM->enmVMState >= VMSTATE_TERMINATED))
    1650         return VERR_INVALID_POINTER;
    1651 
    1652     uint16_t hGVM = pVM->hSelf;
    1653     ASMCompilerBarrier();
    1654     if (RT_UNLIKELY(    hGVM == NIL_GVM_HANDLE
    1655                     ||  hGVM >= RT_ELEMENTS(pGVMM->aHandles)))
    1656         return VERR_INVALID_HANDLE;
    1657 
    1658     /*
    1659      * Look it up.
    1660      */
    1661     PGVMHANDLE pHandle = &pGVMM->aHandles[hGVM];
    1662     PGVM pGVM;
    1663     if (fTakeUsedLock)
    1664     {
    1665         int rc = GVMMR0_USED_SHARED_LOCK(pGVMM);
    1666         AssertRCReturn(rc, rc);
    1667 
    1668         pGVM = pHandle->pGVM;
    1669         if (RT_UNLIKELY(    pHandle->pVM != pVM
    1670                         ||  pHandle->ProcId != ProcId
    1671                         ||  !VALID_PTR(pHandle->pvObj)
    1672                         ||  !VALID_PTR(pGVM)
    1673                         ||  pGVM != pVM))
    1674         {
    1675             GVMMR0_USED_SHARED_UNLOCK(pGVMM);
    1676             return VERR_INVALID_HANDLE;
    1677         }
    1678     }
    1679     else
    1680     {
    1681         if (RT_UNLIKELY(pHandle->pVM != pVM))
    1682             return VERR_INVALID_HANDLE;
    1683         if (RT_UNLIKELY(pHandle->ProcId != ProcId))
    1684             return VERR_INVALID_HANDLE;
    1685         if (RT_UNLIKELY(!VALID_PTR(pHandle->pvObj)))
    1686             return VERR_INVALID_HANDLE;
    1687 
    1688         pGVM = pHandle->pGVM;
    1689         if (RT_UNLIKELY(!VALID_PTR(pGVM)))
    1690             return VERR_INVALID_HANDLE;
    1691         if (RT_UNLIKELY(pGVM != pVM))
    1692             return VERR_INVALID_HANDLE;
    1693     }
    1694 
    1695     *ppGVM = pGVM;
    1696     *ppGVMM = pGVMM;
    1697     return VINF_SUCCESS;
    1698 }
    1699 
    1700 
    1701 /**
    1702  * Fast look up a GVM structure by the cross context VM structure.
    1703  *
    1704  * This is mainly used a glue function, so performance is .
    1705  *
    1706  * @returns GVM on success, NULL on failure.
    1707  * @param   pVM             The cross context VM structure.  ASSUMES to be
    1708  *                          reasonably valid, so we can do fewer checks than in
    1709  *                          gvmmR0ByVM.
    1710  *
    1711  * @note    Do not use this on pVM structures from userland!
    1712  */
    1713 GVMMR0DECL(PGVM) GVMMR0FastGetGVMByVM(PVMCC pVM)
    1714 {
    1715     AssertPtr(pVM);
    1716     Assert(!((uintptr_t)pVM & PAGE_OFFSET_MASK));
    1717 
    1718     PGVMM pGVMM;
    1719     GVMM_GET_VALID_INSTANCE(pGVMM, NULL);
    1720 
    1721     /*
    1722      * Validate.
    1723      */
    1724     uint16_t hGVM = pVM->hSelf;
    1725     ASMCompilerBarrier();
    1726     AssertReturn(hGVM != NIL_GVM_HANDLE && hGVM < RT_ELEMENTS(pGVMM->aHandles), NULL);
    1727 
    1728     /*
    1729      * Look it up and check pVM against the value in the handle and GVM structures.
    1730      */
    1731     PGVMHANDLE pHandle = &pGVMM->aHandles[hGVM];
    1732     AssertReturn(pHandle->pVM == pVM, NULL);
    1733 
    1734     PGVM pGVM = pHandle->pGVM;
    1735     AssertPtrReturn(pGVM, NULL);
    1736     AssertReturn(pGVM == pVM, NULL);
    1737 
    1738     return pGVM;
    1739 }
    1740 
    1741 
    1742 /**
    1743  * Check that the given GVM and VM structures match up.
    1744  *
    1745  * The calling thread must be in the same process as the VM. All current lookups
    1746  * are by threads inside the same process, so this will not be an issue.
    1747  *
    1748  * @returns VBox status code.
    1749  * @param   pGVM            The global (ring-0) VM structure.
    1750  * @param   pVM             The cross context VM structure.
    1751  * @param   ppGVMM          Where to store the pointer to the GVMM instance data.
    1752  * @param   fTakeUsedLock   Whether to take the used lock or not.  We take it in
    1753  *                          shared mode when requested.
    1754  *
    1755  *                          Be very careful if not taking the lock as it's
    1756  *                          possible that the VM will disappear then!
    1757  *
    1758  * @remark  This will not assert on an invalid pVM but try return silently.
    1759  */
    1760 static int gvmmR0ByGVMandVM(PGVM pGVM, PVMCC pVM, PGVMM *ppGVMM, bool fTakeUsedLock)
     1628 * @remark  This will not assert on an invalid pGVM but try return silently.
     1629 */
     1630static int gvmmR0ByGVM(PGVM pGVM, PGVMM *ppGVMM, bool fTakeUsedLock)
    17611631{
    17621632    /*
     
    17641634     */
    17651635    int rc;
    1766     if (RT_LIKELY(RT_VALID_PTR(pGVM)))
    1767     {
    1768         if (RT_LIKELY(   RT_VALID_PTR(pVM)
    1769                       && ((uintptr_t)pVM & PAGE_OFFSET_MASK) == 0))
    1770         {
    1771             if (RT_LIKELY(pGVM == pVM))
     1636    if (RT_LIKELY(   RT_VALID_PTR(pGVM)
     1637                  && ((uintptr_t)pGVM & PAGE_OFFSET_MASK) == 0 ))
     1638    {
     1639        /*
     1640         * Get the pGVMM instance and check the VM handle.
     1641         */
     1642        PGVMM pGVMM;
     1643        GVMM_GET_VALID_INSTANCE(pGVMM, VERR_GVMM_INSTANCE);
     1644
     1645        uint16_t hGVM = pGVM->hSelf;
     1646        if (RT_LIKELY(   hGVM != NIL_GVM_HANDLE
     1647                      && hGVM < RT_ELEMENTS(pGVMM->aHandles)))
     1648        {
     1649            RTPROCESS const pidSelf = RTProcSelf();
     1650            PGVMHANDLE      pHandle = &pGVMM->aHandles[hGVM];
     1651            if (fTakeUsedLock)
     1652            {
     1653                rc = GVMMR0_USED_SHARED_LOCK(pGVMM);
     1654                AssertRCReturn(rc, rc);
     1655            }
     1656
     1657            if (RT_LIKELY(   pHandle->pGVM   == pGVM
     1658                          && pHandle->ProcId == pidSelf
     1659                          && RT_VALID_PTR(pHandle->pvObj)))
    17721660            {
    17731661                /*
    1774                  * Get the pGVMM instance and check the VM handle.
     1662                 * Some more VM data consistency checks.
    17751663                 */
    1776                 PGVMM pGVMM;
    1777                 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_GVMM_INSTANCE);
    1778 
    1779                 uint16_t hGVM = pGVM->hSelf;
    1780                 if (RT_LIKELY(   hGVM != NIL_GVM_HANDLE
    1781                               && hGVM < RT_ELEMENTS(pGVMM->aHandles)))
     1664                if (RT_LIKELY(   pGVM->cCpusUnsafe == pGVM->cCpus
     1665                              && pGVM->hSelfUnsafe == hGVM
     1666                              && pGVM->pSelf       == pGVM))
    17821667                {
    1783                     RTPROCESS const pidSelf = RTProcSelf();
    1784                     PGVMHANDLE      pHandle = &pGVMM->aHandles[hGVM];
    1785                     if (fTakeUsedLock)
     1668                    if (RT_LIKELY(   pGVM->enmVMState >= VMSTATE_CREATING
     1669                                  && pGVM->enmVMState <= VMSTATE_TERMINATED))
    17861670                    {
    1787                         rc = GVMMR0_USED_SHARED_LOCK(pGVMM);
    1788                         AssertRCReturn(rc, rc);
     1671                        *ppGVMM = pGVMM;
     1672                        return VINF_SUCCESS;
    17891673                    }
    1790 
    1791                     if (RT_LIKELY(   pHandle->pGVM   == pGVM
    1792                                   && pHandle->pVM    == pVM
    1793                                   && pHandle->ProcId == pidSelf
    1794                                   && RT_VALID_PTR(pHandle->pvObj)))
    1795                     {
    1796                         /*
    1797                          * Some more VM data consistency checks.
    1798                          */
    1799                         if (RT_LIKELY(   pVM->cCpus == pGVM->cCpus
    1800                                       && pVM->hSelf == hGVM
    1801                                       && pVM->enmVMState >= VMSTATE_CREATING
    1802                                       && pVM->enmVMState <= VMSTATE_TERMINATED
    1803                                       && pVM->pSelf == pVM
    1804                                       ))
    1805                         {
    1806                             *ppGVMM = pGVMM;
    1807                             return VINF_SUCCESS;
    1808                         }
    1809                     }
    1810 
    1811                     if (fTakeUsedLock)
    1812                         GVMMR0_USED_SHARED_UNLOCK(pGVMM);
     1674                    rc = VERR_INCONSISTENT_VM_HANDLE;
    18131675                }
     1676                else
     1677                    rc = VERR_INCONSISTENT_VM_HANDLE;
    18141678            }
     1679            else
     1680                rc = VERR_INVALID_VM_HANDLE;
     1681
     1682            if (fTakeUsedLock)
     1683                GVMMR0_USED_SHARED_UNLOCK(pGVMM);
     1684        }
     1685        else
    18151686            rc = VERR_INVALID_VM_HANDLE;
    1816         }
    1817         else
    1818             rc = VERR_INVALID_POINTER;
    18191687    }
    18201688    else
     
    18251693
    18261694/**
     1695 * Validates a GVM/VM pair.
     1696 *
     1697 * @returns VBox status code.
     1698 * @param   pGVM        The global (ring-0) VM structure.
     1699 */
     1700GVMMR0DECL(int) GVMMR0ValidateGVM(PGVM pGVM)
     1701{
     1702    PGVMM pGVMM;
     1703    return gvmmR0ByGVM(pGVM, &pGVMM, false /*fTakeUsedLock*/);
     1704}
     1705
     1706
     1707/**
    18271708 * Check that the given GVM and VM structures match up.
    18281709 *
     
    18321713 * @returns VBox status code.
    18331714 * @param   pGVM            The global (ring-0) VM structure.
    1834  * @param   pVM             The cross context VM structure.
    18351715 * @param   idCpu           The (alleged) Virtual CPU ID of the calling EMT.
    18361716 * @param   ppGVMM          Where to store the pointer to the GVMM instance data.
     
    18391719 * @remarks This will assert in all failure paths.
    18401720 */
    1841 static int gvmmR0ByGVMandVMandEMT(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, PGVMM *ppGVMM)
     1721static int gvmmR0ByGVMandEMT(PGVM pGVM, VMCPUID idCpu, PGVMM *ppGVMM)
    18421722{
    18431723    /*
     
    18451725     */
    18461726    AssertPtrReturn(pGVM, VERR_INVALID_POINTER);
    1847 
    1848     AssertPtrReturn(pVM,  VERR_INVALID_POINTER);
    1849     AssertReturn(((uintptr_t)pVM & PAGE_OFFSET_MASK) == 0, VERR_INVALID_POINTER);
    1850     AssertReturn(pGVM == pVM, VERR_INVALID_VM_HANDLE);
     1727    AssertReturn(((uintptr_t)pGVM & PAGE_OFFSET_MASK) == 0, VERR_INVALID_POINTER);
    18511728
    18521729    /*
     
    18641741    PGVMHANDLE      pHandle = &pGVMM->aHandles[hGVM];
    18651742    AssertReturn(   pHandle->pGVM   == pGVM
    1866                  && pHandle->pVM    == pVM
    18671743                 && pHandle->ProcId == pidSelf
    18681744                 && RT_VALID_PTR(pHandle->pvObj),
     
    18791755     * Some more VM data consistency checks.
    18801756     */
    1881     AssertReturn(pVM->cCpus == pGVM->cCpus, VERR_INCONSISTENT_VM_HANDLE);
    1882     AssertReturn(pVM->hSelf == hGVM, VERR_INCONSISTENT_VM_HANDLE);
    1883     AssertReturn(   pVM->enmVMState >= VMSTATE_CREATING
    1884                  && pVM->enmVMState <= VMSTATE_TERMINATED, VERR_INCONSISTENT_VM_HANDLE);
     1757    AssertReturn(pGVM->cCpusUnsafe == pGVM->cCpus, VERR_INCONSISTENT_VM_HANDLE);
     1758    AssertReturn(pGVM->hSelfUnsafe == hGVM, VERR_INCONSISTENT_VM_HANDLE);
     1759    AssertReturn(   pGVM->enmVMState >= VMSTATE_CREATING
     1760                 && pGVM->enmVMState <= VMSTATE_TERMINATED, VERR_INCONSISTENT_VM_HANDLE);
    18851761
    18861762    *ppGVMM = pGVMM;
     
    18901766
    18911767/**
    1892  * Validates a GVM/VM pair.
     1768 * Validates a GVM/EMT pair.
    18931769 *
    18941770 * @returns VBox status code.
    18951771 * @param   pGVM        The global (ring-0) VM structure.
    1896  * @param   pVM         The cross context VM structure.
    1897  */
    1898 GVMMR0DECL(int) GVMMR0ValidateGVMandVM(PGVM pGVM, PVMCC pVM)
    1899 {
    1900     PGVMM pGVMM;
    1901     return gvmmR0ByGVMandVM(pGVM, pVM, &pGVMM, false /*fTakeUsedLock*/);
    1902 }
    1903 
    1904 
    1905 
    1906 /**
    1907  * Validates a GVM/VM/EMT combo.
    1908  *
    1909  * @returns VBox status code.
    1910  * @param   pGVM        The global (ring-0) VM structure.
    1911  * @param   pVM         The cross context VM structure.
    19121772 * @param   idCpu       The Virtual CPU ID of the calling EMT.
    19131773 * @thread  EMT(idCpu)
    19141774 */
    1915 GVMMR0DECL(int) GVMMR0ValidateGVMandVMandEMT(PGVM pGVM, PVMCC pVM, VMCPUID idCpu)
     1775GVMMR0DECL(int) GVMMR0ValidateGVMandEMT(PGVM pGVM, VMCPUID idCpu)
    19161776{
    19171777    PGVMM pGVMM;
    1918     return gvmmR0ByGVMandVMandEMT(pGVM, pVM, idCpu, &pGVMM);
     1778    return gvmmR0ByGVMandEMT(pGVM, idCpu, &pGVMM);
    19191779}
    19201780
     
    19381798     */
    19391799    PGVMM pGVMM = g_pGVMM;
    1940     if (    !VALID_PTR(pGVMM)
     1800    if (    !RT_VALID_PTR(pGVMM)
    19411801        ||  pGVMM->u32Magic != GVMM_MAGIC)
    19421802        return NULL;
     
    19541814        if (    pGVMM->aHandles[i].iSelf == i
    19551815            &&  pGVMM->aHandles[i].ProcId == ProcId
    1956             &&  VALID_PTR(pGVMM->aHandles[i].pvObj)
    1957             &&  VALID_PTR(pGVMM->aHandles[i].pVM)
    1958             &&  VALID_PTR(pGVMM->aHandles[i].pGVM))
     1816            &&  RT_VALID_PTR(pGVMM->aHandles[i].pvObj)
     1817            &&  RT_VALID_PTR(pGVMM->aHandles[i].pGVM))
    19591818        {
    19601819            if (pGVMM->aHandles[i].hEMT0 == hEMT)
    1961                 return pGVMM->aHandles[i].pVM;
     1820                return pGVMM->aHandles[i].pGVM;
    19621821
    19631822            /* This is fearly safe with the current process per VM approach. */
     
    19701829            for (VMCPUID idCpu = 1; idCpu < cCpus; idCpu++)
    19711830                if (pGVM->aCpus[idCpu].hEMT == hEMT)
    1972                     return pGVMM->aHandles[i].pVM;
     1831                    return pGVMM->aHandles[i].pGVM;
    19731832        }
    19741833    }
     
    19951854     */
    19961855    PGVMM pGVMM = g_pGVMM;
    1997     if (   !VALID_PTR(pGVMM)
     1856    if (   !RT_VALID_PTR(pGVMM)
    19981857        || pGVMM->u32Magic != GVMM_MAGIC)
    19991858        return NULL;
     
    20111870        if (   pGVMM->aHandles[i].iSelf == i
    20121871            && pGVMM->aHandles[i].ProcId == ProcId
    2013             && VALID_PTR(pGVMM->aHandles[i].pvObj)
    2014             && VALID_PTR(pGVMM->aHandles[i].pVM)
    2015             && VALID_PTR(pGVMM->aHandles[i].pGVM))
     1872            && RT_VALID_PTR(pGVMM->aHandles[i].pvObj)
     1873            && RT_VALID_PTR(pGVMM->aHandles[i].pGVM))
    20161874        {
    20171875            PGVM pGVM = pGVMM->aHandles[i].pGVM;
     
    20821940    {
    20831941        PGVM pCurGVM = pGVMM->aHandles[i].pGVM;
    2084         if (    VALID_PTR(pCurGVM)
     1942        if (    RT_VALID_PTR(pCurGVM)
    20851943            &&  pCurGVM->u32Magic == GVM_MAGIC)
    20861944        {
     
    21231981        {
    21241982            PGVM pCurGVM = pGVMM->aHandles[i].pGVM;
    2125             if (    VALID_PTR(pCurGVM)
     1983            if (    RT_VALID_PTR(pCurGVM)
    21261984                &&  pCurGVM->u32Magic == GVM_MAGIC)
    21271985            {
     
    21532011        {
    21542012            PGVM pCurGVM = pGVMM->aHandles[i].pGVM;
    2155             if (    VALID_PTR(pCurGVM)
     2013            if (    RT_VALID_PTR(pCurGVM)
    21562014                &&  pCurGVM->u32Magic == GVM_MAGIC)
    21572015            {
     
    21922050 *          VERR_INTERRUPTED if a signal was scheduled for the thread.
    21932051 * @param   pGVM                The global (ring-0) VM structure.
    2194  * @param   pVM                 The cross context VM structure.
    21952052 * @param   pGVCpu              The global (ring-0) CPU structure of the calling
    21962053 *                              EMT.
     
    21982055 * @thread  EMT(pGVCpu).
    21992056 */
    2200 GVMMR0DECL(int) GVMMR0SchedHalt(PGVM pGVM, PVMCC pVM, PGVMCPU pGVCpu, uint64_t u64ExpireGipTime)
    2201 {
    2202     LogFlow(("GVMMR0SchedHalt: pGVM=%p pVM=%p pGVCpu=%p(%d) u64ExpireGipTime=%#RX64\n",
    2203              pGVM, pVM, pGVCpu, pGVCpu->idCpu, u64ExpireGipTime));
     2057GVMMR0DECL(int) GVMMR0SchedHalt(PGVM pGVM, PGVMCPU pGVCpu, uint64_t u64ExpireGipTime)
     2058{
     2059    LogFlow(("GVMMR0SchedHalt: pGVM=%p pGVCpu=%p(%d) u64ExpireGipTime=%#RX64\n",
     2060             pGVM, pGVCpu, pGVCpu->idCpu, u64ExpireGipTime));
    22042061    GVMM_CHECK_SMAP_SETUP();
    2205     GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     2062    GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    22062063
    22072064    PGVMM pGVMM;
     
    22202077    {
    22212078        int rc2 = GVMMR0_USED_SHARED_LOCK(pGVMM); AssertRC(rc2);
    2222         GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     2079        GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    22232080    }
    22242081
     
    22312088    const uint64_t u64NowSys = RTTimeSystemNanoTS();
    22322089    const uint64_t u64NowGip = RTTimeNanoTS();
    2233     GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     2090    GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    22342091
    22352092    if (fDoEarlyWakeUps)
    22362093    {
    22372094        pGVM->gvmm.s.StatsSched.cHaltWakeUps += gvmmR0SchedDoWakeUps(pGVMM, u64NowGip);
    2238         GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     2095        GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    22392096    }
    22402097
     
    22612118            GVMMR0_USED_SHARED_UNLOCK(pGVMM);
    22622119        }
    2263         GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     2120        GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    22642121
    22652122        rc = RTSemEventMultiWaitEx(pGVCpu->gvmm.s.HaltEventMulti,
    22662123                                   RTSEMWAIT_FLAGS_ABSOLUTE | RTSEMWAIT_FLAGS_NANOSECS | RTSEMWAIT_FLAGS_INTERRUPTIBLE,
    22672124                                   u64NowGip > u64NowSys ? u64ExpireGipTime : u64NowSys + cNsInterval);
    2268         GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     2125        GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    22692126
    22702127        ASMAtomicWriteU64(&pGVCpu->gvmm.s.u64HaltExpire, 0);
     
    22752132        {
    22762133            RTSemEventMultiReset(pGVCpu->gvmm.s.HaltEventMulti);
    2277             GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     2134            GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    22782135        }
    22792136        else if (rc == VERR_TIMEOUT)
     
    22882145        if (fDoEarlyWakeUps)
    22892146            GVMMR0_USED_SHARED_UNLOCK(pGVMM);
    2290         GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     2147        GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    22912148        RTSemEventMultiReset(pGVCpu->gvmm.s.HaltEventMulti);
    2292         GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     2149        GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    22932150        rc = VINF_SUCCESS;
    22942151    }
     
    23042161 *          VERR_INTERRUPTED if a signal was scheduled for the thread.
    23052162 * @param   pGVM                The global (ring-0) VM structure.
    2306  * @param   pVM                 The cross context VM structure.
    23072163 * @param   idCpu               The Virtual CPU ID of the calling EMT.
    23082164 * @param   u64ExpireGipTime    The time for the sleep to expire expressed as GIP time.
    23092165 * @thread  EMT(idCpu).
    23102166 */
    2311 GVMMR0DECL(int) GVMMR0SchedHaltReq(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, uint64_t u64ExpireGipTime)
     2167GVMMR0DECL(int) GVMMR0SchedHaltReq(PGVM pGVM, VMCPUID idCpu, uint64_t u64ExpireGipTime)
    23122168{
    23132169    GVMM_CHECK_SMAP_SETUP();
    2314     GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     2170    GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    23152171    PGVMM pGVMM;
    2316     int rc = gvmmR0ByGVMandVMandEMT(pGVM, pVM, idCpu, &pGVMM);
     2172    int rc = gvmmR0ByGVMandEMT(pGVM, idCpu, &pGVMM);
    23172173    if (RT_SUCCESS(rc))
    23182174    {
    2319         GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
    2320         rc = GVMMR0SchedHalt(pGVM, pVM, &pGVM->aCpus[idCpu], u64ExpireGipTime);
    2321     }
    2322     GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     2175        GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
     2176        rc = GVMMR0SchedHalt(pGVM, &pGVM->aCpus[idCpu], u64ExpireGipTime);
     2177    }
     2178    GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    23232179    return rc;
    23242180}
     
    23762232 *
    23772233 * @param   pGVM                The global (ring-0) VM structure.
    2378  * @param   pVM                 The cross context VM structure.
    23792234 * @param   idCpu               The Virtual CPU ID of the EMT to wake up.
    23802235 * @param   fTakeUsedLock       Take the used lock or not
    23812236 * @thread  Any but EMT(idCpu).
    23822237 */
    2383 GVMMR0DECL(int) GVMMR0SchedWakeUpEx(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, bool fTakeUsedLock)
     2238GVMMR0DECL(int) GVMMR0SchedWakeUpEx(PGVM pGVM, VMCPUID idCpu, bool fTakeUsedLock)
    23842239{
    23852240    GVMM_CHECK_SMAP_SETUP();
    2386     GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     2241    GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    23872242
    23882243    /*
     
    23902245     */
    23912246    PGVMM pGVMM;
    2392     int rc = gvmmR0ByGVMandVM(pGVM, pVM, &pGVMM, fTakeUsedLock);
    2393     GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     2247    int rc = gvmmR0ByGVM(pGVM, &pGVMM, fTakeUsedLock);
     2248    GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    23942249    if (RT_SUCCESS(rc))
    23952250    {
     
    24002255             */
    24012256            rc = gvmmR0SchedWakeUpOne(pGVM, &pGVM->aCpus[idCpu]);
    2402             GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     2257            GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    24032258
    24042259            if (fTakeUsedLock && pGVMM->fDoEarlyWakeUps)
     
    24102265                const uint64_t u64Now = RTTimeNanoTS(); /* (GIP time) */
    24112266                pGVM->gvmm.s.StatsSched.cWakeUpWakeUps += gvmmR0SchedDoWakeUps(pGVMM, u64Now);
    2412                 GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     2267                GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    24132268            }
    24142269        }
     
    24202275            int rc2 = GVMMR0_USED_SHARED_UNLOCK(pGVMM);
    24212276            AssertRC(rc2);
    2422             GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     2277            GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    24232278        }
    24242279    }
     
    24372292 *
    24382293 * @param   pGVM                The global (ring-0) VM structure.
    2439  * @param   pVM                 The cross context VM structure.
    24402294 * @param   idCpu               The Virtual CPU ID of the EMT to wake up.
    24412295 * @thread  Any but EMT(idCpu).
    24422296 */
    2443 GVMMR0DECL(int) GVMMR0SchedWakeUp(PGVM pGVM, PVMCC pVM, VMCPUID idCpu)
    2444 {
    2445     return GVMMR0SchedWakeUpEx(pGVM, pVM, idCpu, true /* fTakeUsedLock */);
     2297GVMMR0DECL(int) GVMMR0SchedWakeUp(PGVM pGVM, VMCPUID idCpu)
     2298{
     2299    return GVMMR0SchedWakeUpEx(pGVM, idCpu, true /* fTakeUsedLock */);
    24462300}
    24472301
     
    24552309 * @retval  VINF_GVM_NOT_BLOCKED if the EMT wasn't blocked.
    24562310 *
    2457  * @param   pVM                 The cross context VM structure.
     2311 * @param   pGVM                The global (ring-0) VM structure.
    24582312 * @param   idCpu               The Virtual CPU ID of the EMT to wake up.
    24592313 * @thread  Any but EMT(idCpu).
    24602314 * @deprecated  Don't use in new code if possible!  Use the GVM variant.
    24612315 */
    2462 GVMMR0DECL(int) GVMMR0SchedWakeUpNoGVMNoLock(PVMCC pVM, VMCPUID idCpu)
     2316GVMMR0DECL(int) GVMMR0SchedWakeUpNoGVMNoLock(PGVM pGVM, VMCPUID idCpu)
    24632317{
    24642318    GVMM_CHECK_SMAP_SETUP();
    2465     GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
    2466     PGVM pGVM;
     2319    GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    24672320    PGVMM pGVMM;
    2468     int rc = gvmmR0ByVM(pVM, &pGVM, &pGVMM, false /*fTakeUsedLock*/);
    2469     GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     2321    int rc = gvmmR0ByGVM(pGVM, &pGVMM, false /*fTakeUsedLock*/);
     2322    GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    24702323    if (RT_SUCCESS(rc))
    2471         rc = GVMMR0SchedWakeUpEx(pGVM, pVM, idCpu, false /*fTakeUsedLock*/);
     2324        rc = GVMMR0SchedWakeUpEx(pGVM, idCpu, false /*fTakeUsedLock*/);
    24722325    return rc;
    24732326}
     
    25112364 *
    25122365 * @param   pGVM                The global (ring-0) VM structure.
    2513  * @param   pVM                 The cross context VM structure.
    25142366 * @param   idCpu               The ID of the virtual CPU to poke.
    25152367 * @param   fTakeUsedLock       Take the used lock or not
    25162368 */
    2517 GVMMR0DECL(int) GVMMR0SchedPokeEx(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, bool fTakeUsedLock)
     2369GVMMR0DECL(int) GVMMR0SchedPokeEx(PGVM pGVM, VMCPUID idCpu, bool fTakeUsedLock)
    25182370{
    25192371    /*
     
    25212373     */
    25222374    PGVMM pGVMM;
    2523     int rc = gvmmR0ByGVMandVM(pGVM, pVM, &pGVMM, fTakeUsedLock);
     2375    int rc = gvmmR0ByGVM(pGVM, &pGVMM, fTakeUsedLock);
    25242376    if (RT_SUCCESS(rc))
    25252377    {
     
    25492401 *
    25502402 * @param   pGVM                The global (ring-0) VM structure.
    2551  * @param   pVM                 The cross context VM structure.
    25522403 * @param   idCpu               The ID of the virtual CPU to poke.
    25532404 */
    2554 GVMMR0DECL(int) GVMMR0SchedPoke(PGVM pGVM, PVMCC pVM, VMCPUID idCpu)
    2555 {
    2556     return GVMMR0SchedPokeEx(pGVM, pVM, idCpu, true /* fTakeUsedLock */);
     2405GVMMR0DECL(int) GVMMR0SchedPoke(PGVM pGVM, VMCPUID idCpu)
     2406{
     2407    return GVMMR0SchedPokeEx(pGVM, idCpu, true /* fTakeUsedLock */);
    25572408}
    25582409
     
    25662417 * @retval  VINF_GVM_NOT_BUSY_IN_GC if the EMT wasn't busy in GC.
    25672418 *
    2568  * @param   pVM                 The cross context VM structure.
     2419 * @param   pGVM                The global (ring-0) VM structure.
    25692420 * @param   idCpu               The ID of the virtual CPU to poke.
    25702421 *
    25712422 * @deprecated  Don't use in new code if possible!  Use the GVM variant.
    25722423 */
    2573 GVMMR0DECL(int) GVMMR0SchedPokeNoGVMNoLock(PVMCC pVM, VMCPUID idCpu)
    2574 {
    2575     PGVM pGVM;
     2424GVMMR0DECL(int) GVMMR0SchedPokeNoGVMNoLock(PGVM pGVM, VMCPUID idCpu)
     2425{
    25762426    PGVMM pGVMM;
    2577     int rc = gvmmR0ByVM(pVM, &pGVM, &pGVMM, false /*fTakeUsedLock*/);
     2427    int rc = gvmmR0ByGVM(pGVM, &pGVMM, false /*fTakeUsedLock*/);
    25782428    if (RT_SUCCESS(rc))
    25792429    {
     
    25932443 *
    25942444 * @param   pGVM                The global (ring-0) VM structure.
    2595  * @param   pVM                 The cross context VM structure.
    25962445 * @param   pSleepSet           The set of sleepers to wake up.
    25972446 * @param   pPokeSet            The set of CPUs to poke.
    25982447 */
    2599 GVMMR0DECL(int) GVMMR0SchedWakeUpAndPokeCpus(PGVM pGVM, PVMCC pVM, PCVMCPUSET pSleepSet, PCVMCPUSET pPokeSet)
     2448GVMMR0DECL(int) GVMMR0SchedWakeUpAndPokeCpus(PGVM pGVM, PCVMCPUSET pSleepSet, PCVMCPUSET pPokeSet)
    26002449{
    26012450    AssertPtrReturn(pSleepSet, VERR_INVALID_POINTER);
    26022451    AssertPtrReturn(pPokeSet, VERR_INVALID_POINTER);
    26032452    GVMM_CHECK_SMAP_SETUP();
    2604     GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     2453    GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    26052454    RTNATIVETHREAD hSelf = RTThreadNativeSelf();
    26062455
     
    26092458     */
    26102459    PGVMM pGVMM;
    2611     int rc = gvmmR0ByGVMandVM(pGVM, pVM, &pGVMM, true /* fTakeUsedLock */);
    2612     GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     2460    int rc = gvmmR0ByGVM(pGVM, &pGVMM, true /* fTakeUsedLock */);
     2461    GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    26132462    if (RT_SUCCESS(rc))
    26142463    {
     
    26252474            {
    26262475                gvmmR0SchedWakeUpOne(pGVM, &pGVM->aCpus[idCpu]);
    2627                 GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     2476                GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    26282477            }
    26292478            else if (VMCPUSET_IS_PRESENT(pPokeSet, idCpu))
    26302479            {
    26312480                gvmmR0SchedPokeOne(pGVM, &pGVM->aCpus[idCpu]);
    2632                 GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     2481                GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    26332482            }
    26342483        }
     
    26362485        int rc2 = GVMMR0_USED_SHARED_UNLOCK(pGVMM);
    26372486        AssertRC(rc2);
    2638         GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     2487        GVMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    26392488    }
    26402489
     
    26492498 * @returns see GVMMR0SchedWakeUpAndPokeCpus.
    26502499 * @param   pGVM            The global (ring-0) VM structure.
    2651  * @param   pVM             The cross context VM structure.
    26522500 * @param   pReq            Pointer to the request packet.
    26532501 */
    2654 GVMMR0DECL(int) GVMMR0SchedWakeUpAndPokeCpusReq(PGVM pGVM, PVMCC pVM, PGVMMSCHEDWAKEUPANDPOKECPUSREQ pReq)
     2502GVMMR0DECL(int) GVMMR0SchedWakeUpAndPokeCpusReq(PGVM pGVM, PGVMMSCHEDWAKEUPANDPOKECPUSREQ pReq)
    26552503{
    26562504    /*
     
    26602508    AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
    26612509
    2662     return GVMMR0SchedWakeUpAndPokeCpus(pGVM, pVM, &pReq->SleepSet, &pReq->PokeSet);
     2510    return GVMMR0SchedWakeUpAndPokeCpus(pGVM, &pReq->SleepSet, &pReq->PokeSet);
    26632511}
    26642512
     
    26742522 *          VINF_GVM_YIELDED if an attempt to switch to a different VM task was made.
    26752523 * @param   pGVM            The global (ring-0) VM structure.
    2676  * @param   pVM             The cross context VM structure.
    26772524 * @param   idCpu           The Virtual CPU ID of the calling EMT.
    26782525 * @param   fYield          Whether to yield or not.
     
    26802527 * @thread  EMT(idCpu).
    26812528 */
    2682 GVMMR0DECL(int) GVMMR0SchedPoll(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, bool fYield)
     2529GVMMR0DECL(int) GVMMR0SchedPoll(PGVM pGVM, VMCPUID idCpu, bool fYield)
    26832530{
    26842531    /*
     
    26862533     */
    26872534    PGVMM pGVMM;
    2688     int rc = gvmmR0ByGVMandVMandEMT(pGVM, pVM, idCpu, &pGVMM);
     2535    int rc = gvmmR0ByGVMandEMT(pGVM, idCpu, &pGVMM);
    26892536    if (RT_SUCCESS(rc))
    26902537    {
     
    28092656 * The caller must check that the host can do high resolution timers.
    28102657 *
    2811  * @param   pVM         The cross context VM structure.
     2658 * @param   pGVM        The global (ring-0) VM structure.
    28122659 * @param   idHostCpu   The current host CPU id.
    28132660 * @param   uHz         The desired frequency.
    28142661 */
    2815 GVMMR0DECL(void) GVMMR0SchedUpdatePeriodicPreemptionTimer(PVMCC pVM, RTCPUID idHostCpu, uint32_t uHz)
    2816 {
    2817     NOREF(pVM);
     2662GVMMR0DECL(void) GVMMR0SchedUpdatePeriodicPreemptionTimer(PGVM pGVM, RTCPUID idHostCpu, uint32_t uHz)
     2663{
     2664    NOREF(pGVM);
    28182665#ifdef GVMM_SCHED_WITH_PPT
    28192666    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     
    28252672    uint32_t    iCpu  = RTMpCpuIdToSetIndex(idHostCpu);
    28262673    PGVMM       pGVMM = g_pGVMM;
    2827     if (   !VALID_PTR(pGVMM)
     2674    if (   !RT_VALID_PTR(pGVMM)
    28282675        || pGVMM->u32Magic != GVMM_MAGIC)
    28292676        return;
     
    28942741 * @param   pSession    The current session.
    28952742 * @param   pGVM        The GVM to obtain statistics for. Optional.
    2896  * @param   pVM         The VM structure corresponding to @a pGVM.
    2897  */
    2898 GVMMR0DECL(int) GVMMR0QueryStatistics(PGVMMSTATS pStats, PSUPDRVSESSION pSession, PGVM pGVM, PVMCC pVM)
    2899 {
    2900     LogFlow(("GVMMR0QueryStatistics: pStats=%p pSession=%p pGVM=%p pVM=%p\n", pStats, pSession, pGVM, pVM));
     2743 */
     2744GVMMR0DECL(int) GVMMR0QueryStatistics(PGVMMSTATS pStats, PSUPDRVSESSION pSession, PGVM pGVM)
     2745{
     2746    LogFlow(("GVMMR0QueryStatistics: pStats=%p pSession=%p pGVM=%p\n", pStats, pSession, pGVM));
    29012747
    29022748    /*
     
    29132759    if (pGVM)
    29142760    {
    2915         int rc = gvmmR0ByGVMandVM(pGVM, pVM, &pGVMM, true /*fTakeUsedLock*/);
     2761        int rc = gvmmR0ByGVM(pGVM, &pGVMM, true /*fTakeUsedLock*/);
    29162762        if (RT_FAILURE(rc))
    29172763            return rc;
     
    29402786        PGVM pOtherGVM = pGVMM->aHandles[i].pGVM;
    29412787        void *pvObj = pGVMM->aHandles[i].pvObj;
    2942         if (    VALID_PTR(pvObj)
    2943             &&  VALID_PTR(pOtherGVM)
     2788        if (    RT_VALID_PTR(pvObj)
     2789            &&  RT_VALID_PTR(pOtherGVM)
    29442790            &&  pOtherGVM->u32Magic == GVM_MAGIC
    29452791            &&  RT_SUCCESS(SUPR0ObjVerifyAccess(pvObj, pSession, NULL)))
     
    30072853 * @returns see GVMMR0QueryStatistics.
    30082854 * @param   pGVM            The global (ring-0) VM structure. Optional.
    3009  * @param   pVM             The cross context VM structure. Optional.
    30102855 * @param   pReq            Pointer to the request packet.
    30112856 * @param   pSession        The current session.
    30122857 */
    3013 GVMMR0DECL(int) GVMMR0QueryStatisticsReq(PGVM pGVM, PVMCC pVM, PGVMMQUERYSTATISTICSSREQ pReq, PSUPDRVSESSION pSession)
     2858GVMMR0DECL(int) GVMMR0QueryStatisticsReq(PGVM pGVM, PGVMMQUERYSTATISTICSSREQ pReq, PSUPDRVSESSION pSession)
    30142859{
    30152860    /*
     
    30202865    AssertReturn(pReq->pSession == pSession, VERR_INVALID_PARAMETER);
    30212866
    3022     return GVMMR0QueryStatistics(&pReq->Stats, pSession, pGVM, pVM);
     2867    return GVMMR0QueryStatistics(&pReq->Stats, pSession, pGVM);
    30232868}
    30242869
     
    30322877 * @param   pSession    The current session.
    30332878 * @param   pGVM        The GVM to reset statistics for. Optional.
    3034  * @param   pVM         The VM structure corresponding to @a pGVM.
    3035  */
    3036 GVMMR0DECL(int) GVMMR0ResetStatistics(PCGVMMSTATS pStats, PSUPDRVSESSION pSession, PGVM pGVM, PVMCC pVM)
    3037 {
    3038     LogFlow(("GVMMR0ResetStatistics: pStats=%p pSession=%p pGVM=%p pVM=%p\n", pStats, pSession, pGVM, pVM));
     2879 */
     2880GVMMR0DECL(int) GVMMR0ResetStatistics(PCGVMMSTATS pStats, PSUPDRVSESSION pSession, PGVM pGVM)
     2881{
     2882    LogFlow(("GVMMR0ResetStatistics: pStats=%p pSession=%p pGVM=%p\n", pStats, pSession, pGVM));
    30392883
    30402884    /*
     
    30502894    if (pGVM)
    30512895    {
    3052         int rc = gvmmR0ByGVMandVM(pGVM, pVM, &pGVMM, true /*fTakeUsedLock*/);
     2896        int rc = gvmmR0ByGVM(pGVM, &pGVMM, true /*fTakeUsedLock*/);
    30532897        if (RT_FAILURE(rc))
    30542898            return rc;
     
    30892933            PGVM pOtherGVM = pGVMM->aHandles[i].pGVM;
    30902934            void *pvObj = pGVMM->aHandles[i].pvObj;
    3091             if (    VALID_PTR(pvObj)
    3092                 &&  VALID_PTR(pOtherGVM)
     2935            if (    RT_VALID_PTR(pvObj)
     2936                &&  RT_VALID_PTR(pOtherGVM)
    30932937                &&  pOtherGVM->u32Magic == GVM_MAGIC
    30942938                &&  RT_SUCCESS(SUPR0ObjVerifyAccess(pvObj, pSession, NULL)))
     
    31252969 * @returns see GVMMR0ResetStatistics.
    31262970 * @param   pGVM            The global (ring-0) VM structure. Optional.
    3127  * @param   pVM             The cross context VM structure. Optional.
    31282971 * @param   pReq            Pointer to the request packet.
    31292972 * @param   pSession        The current session.
    31302973 */
    3131 GVMMR0DECL(int) GVMMR0ResetStatisticsReq(PGVM pGVM, PVMCC pVM, PGVMMRESETSTATISTICSSREQ pReq, PSUPDRVSESSION pSession)
     2974GVMMR0DECL(int) GVMMR0ResetStatisticsReq(PGVM pGVM, PGVMMRESETSTATISTICSSREQ pReq, PSUPDRVSESSION pSession)
    31322975{
    31332976    /*
     
    31382981    AssertReturn(pReq->pSession == pSession, VERR_INVALID_PARAMETER);
    31392982
    3140     return GVMMR0ResetStatistics(&pReq->Stats, pSession, pGVM, pVM);
    3141 }
    3142 
     2983    return GVMMR0ResetStatistics(&pReq->Stats, pSession, pGVM);
     2984}
     2985
  • trunk/src/VBox/VMM/VMMR0/NEMR0Native-win.cpp

    r80334 r80346  
    8080*   Internal Functions                                                                                                           *
    8181*********************************************************************************************************************************/
    82 NEM_TMPL_STATIC int  nemR0WinMapPages(PGVM pGVM, PVMCC pVM, PGVMCPU pGVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
     82NEM_TMPL_STATIC int  nemR0WinMapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
    8383                                      uint32_t cPages, uint32_t fFlags);
    8484NEM_TMPL_STATIC int  nemR0WinUnmapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys, uint32_t cPages);
     
    8989NEM_TMPL_STATIC int  nemR0WinResumeCpuTickOnAll(PGVM pGVM, PGVMCPU pGVCpu, uint64_t uPausedTscValue);
    9090#endif
    91 DECLINLINE(NTSTATUS) nemR0NtPerformIoControl(PGVM pGVM, PVMCPUCC pVCpu, uint32_t uFunction, void *pvInput, uint32_t cbInput,
     91DECLINLINE(NTSTATUS) nemR0NtPerformIoControl(PGVM pGVM, PGVMCPU pGVCpu, uint32_t uFunction, void *pvInput, uint32_t cbInput,
    9292                                             void *pvOutput, uint32_t cbOutput);
    9393
     
    156156 * @returns VBox status code.
    157157 * @param   pGVM            The ring-0 VM handle.
    158  * @param   pVM             The cross context VM handle.
    159158 * @thread  EMT(0)
    160159 */
    161 VMMR0_INT_DECL(int) NEMR0InitVM(PGVM pGVM, PVMCC pVM)
     160VMMR0_INT_DECL(int) NEMR0InitVM(PGVM pGVM)
    162161{
    163162    AssertCompile(sizeof(pGVM->nemr0.s) <= sizeof(pGVM->nemr0.padding));
    164163    AssertCompile(sizeof(pGVM->aCpus[0].nemr0.s) <= sizeof(pGVM->aCpus[0].nemr0.padding));
    165164
    166     int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, 0);
     165    int rc = GVMMR0ValidateGVMandEMT(pGVM, 0);
    167166    AssertRCReturn(rc, rc);
    168167
     
    228227    }
    229228
    230     RT_NOREF(pVM);
    231229    return rc;
    232230}
     
    238236 * @returns NT status code.
    239237 * @param   pGVM            The ring-0 VM structure.
    240  * @param   pVCpu           The cross context CPU structure of the calling EMT.
     238 * @param   pGVCpu          The global (ring-0) CPU structure of the calling EMT.
    241239 * @param   uFunction       The function to perform.
    242240 * @param   pvInput         The input buffer.  This must point within the VM
     
    249247 * @param   cbOutput        The size of the output.  @a pvOutput must be NULL
    250248 *                          when zero.
    251  * @thread  EMT(pVCpu)
     249 * @thread  EMT(pGVCpu)
    252250 */
    253 DECLINLINE(NTSTATUS) nemR0NtPerformIoControl(PGVM pGVM, PVMCPUCC pVCpu, uint32_t uFunction, void *pvInput, uint32_t cbInput,
     251DECLINLINE(NTSTATUS) nemR0NtPerformIoControl(PGVM pGVM, PGVMCPU pGVCpu, uint32_t uFunction, void *pvInput, uint32_t cbInput,
    254252                                             void *pvOutput, uint32_t cbOutput)
    255253{
     
    258256     * Input and output parameters are part of the VM CPU structure.
    259257     */
    260     VMCPU_ASSERT_EMT(pVCpu);
     258    VMCPU_ASSERT_EMT(pGVCpu);
    261259    if (pvInput)
    262         AssertReturn(((uintptr_t)pvInput + cbInput) - (uintptr_t)pVCpu <= sizeof(*pVCpu), VERR_INVALID_PARAMETER);
     260        AssertReturn(((uintptr_t)pvInput + cbInput) - (uintptr_t)pGVCpu <= sizeof(*pGVCpu), VERR_INVALID_PARAMETER);
    263261    if (pvOutput)
    264         AssertReturn(((uintptr_t)pvOutput + cbOutput) - (uintptr_t)pVCpu <= sizeof(*pVCpu), VERR_INVALID_PARAMETER);
     262        AssertReturn(((uintptr_t)pvOutput + cbOutput) - (uintptr_t)pGVCpu <= sizeof(*pGVCpu), VERR_INVALID_PARAMETER);
    265263#endif
    266264
     
    268266    int rc = SUPR0IoCtlPerform(pGVM->nemr0.s.pIoCtlCtx, uFunction,
    269267                               pvInput,
    270                                pvInput  ? (uintptr_t)pvInput  + pVCpu->nemr0.s.offRing3ConversionDelta : NIL_RTR3PTR,
     268                               pvInput  ? (uintptr_t)pvInput  + pGVCpu->nemr0.s.offRing3ConversionDelta : NIL_RTR3PTR,
    271269                               cbInput,
    272270                               pvOutput,
    273                                pvOutput ? (uintptr_t)pvOutput + pVCpu->nemr0.s.offRing3ConversionDelta : NIL_RTR3PTR,
     271                               pvOutput ? (uintptr_t)pvOutput + pGVCpu->nemr0.s.offRing3ConversionDelta : NIL_RTR3PTR,
    274272                               cbOutput,
    275273                               &rcNt);
     
    285283 * @returns VBox status code.
    286284 * @param   pGVM            The ring-0 VM handle.
    287  * @param   pVM             The cross context VM handle.
    288285 * @thread  EMT(0)
    289286 */
    290 VMMR0_INT_DECL(int) NEMR0InitVMPart2(PGVM pGVM, PVMCC pVM)
     287VMMR0_INT_DECL(int) NEMR0InitVMPart2(PGVM pGVM)
    291288{
    292     int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, 0);
     289    int rc = GVMMR0ValidateGVMandEMT(pGVM, 0);
    293290    AssertRCReturn(rc, rc);
    294291    SUPR0Printf("NEMR0InitVMPart2\n"); LogRel(("2: NEMR0InitVMPart2\n"));
     
    298295     * Copy and validate the I/O control information from ring-3.
    299296     */
    300     NEMWINIOCTL Copy = pVM->nem.s.IoCtlGetHvPartitionId;
     297    NEMWINIOCTL Copy = pGVM->nem.s.IoCtlGetHvPartitionId;
    301298    AssertLogRelReturn(Copy.uFunction != 0, VERR_NEM_INIT_FAILED);
    302299    AssertLogRelReturn(Copy.cbInput == 0, VERR_NEM_INIT_FAILED);
     
    304301    pGVM->nemr0.s.IoCtlGetHvPartitionId = Copy;
    305302
    306     pGVM->nemr0.s.fMayUseRing0Runloop = pVM->nem.s.fUseRing0Runloop;
    307 
    308     Copy = pVM->nem.s.IoCtlStartVirtualProcessor;
     303    pGVM->nemr0.s.fMayUseRing0Runloop = pGVM->nem.s.fUseRing0Runloop;
     304
     305    Copy = pGVM->nem.s.IoCtlStartVirtualProcessor;
    309306    AssertLogRelStmt(Copy.uFunction != 0, rc = VERR_NEM_INIT_FAILED);
    310307    AssertLogRelStmt(Copy.cbInput == sizeof(HV_VP_INDEX), rc = VERR_NEM_INIT_FAILED);
     
    314311        pGVM->nemr0.s.IoCtlStartVirtualProcessor = Copy;
    315312
    316     Copy = pVM->nem.s.IoCtlStopVirtualProcessor;
     313    Copy = pGVM->nem.s.IoCtlStopVirtualProcessor;
    317314    AssertLogRelStmt(Copy.uFunction != 0, rc = VERR_NEM_INIT_FAILED);
    318315    AssertLogRelStmt(Copy.cbInput == sizeof(HV_VP_INDEX), rc = VERR_NEM_INIT_FAILED);
     
    323320        pGVM->nemr0.s.IoCtlStopVirtualProcessor = Copy;
    324321
    325     Copy = pVM->nem.s.IoCtlMessageSlotHandleAndGetNext;
     322    Copy = pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext;
    326323    AssertLogRelStmt(Copy.uFunction != 0, rc = VERR_NEM_INIT_FAILED);
    327324    AssertLogRelStmt(   Copy.cbInput == sizeof(VID_IOCTL_INPUT_MESSAGE_SLOT_HANDLE_AND_GET_NEXT)
     
    336333
    337334    if (   RT_SUCCESS(rc)
    338         || !pVM->nem.s.fUseRing0Runloop)
     335        || !pGVM->nem.s.fUseRing0Runloop)
    339336    {
    340337        /*
    341338         * Setup of an I/O control context for the partition handle for later use.
    342339         */
    343         rc = SUPR0IoCtlSetupForHandle(pGVM->pSession, pVM->nem.s.hPartitionDevice, 0, &pGVM->nemr0.s.pIoCtlCtx);
     340        rc = SUPR0IoCtlSetupForHandle(pGVM->pSession, pGVM->nem.s.hPartitionDevice, 0, &pGVM->nemr0.s.pIoCtlCtx);
    344341        AssertLogRelRCReturn(rc, rc);
    345342        for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
     
    357354        AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("IoCtlGetHvPartitionId failed: %#x\n", rcNt), VERR_NEM_INIT_FAILED);
    358355        pGVM->nemr0.s.idHvPartition = pVCpu0->nem.s.uIoCtlBuf.idPartition;
    359         AssertLogRelMsgReturn(pGVM->nemr0.s.idHvPartition == pVM->nem.s.idHvPartition,
    360                               ("idHvPartition mismatch: r0=%#RX64, r3=%#RX64\n", pGVM->nemr0.s.idHvPartition, pVM->nem.s.idHvPartition),
     356        AssertLogRelMsgReturn(pGVM->nemr0.s.idHvPartition == pGVM->nem.s.idHvPartition,
     357                              ("idHvPartition mismatch: r0=%#RX64, r3=%#RX64\n", pGVM->nemr0.s.idHvPartition, pGVM->nem.s.idHvPartition),
    361358                              VERR_NEM_INIT_FAILED);
    362359    }
     
    424421 * Worker for NEMR0MapPages and others.
    425422 */
    426 NEM_TMPL_STATIC int nemR0WinMapPages(PGVM pGVM, PVMCC pVM, PGVMCPU pGVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
     423NEM_TMPL_STATIC int nemR0WinMapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
    427424                                     uint32_t cPages, uint32_t fFlags)
    428425{
     
    458455        {
    459456            RTHCPHYS HCPhys = NIL_RTGCPHYS;
    460             int rc = PGMPhysGCPhys2HCPhys(pVM, GCPhysSrc, &HCPhys);
     457            int rc = PGMPhysGCPhys2HCPhys(pGVM, GCPhysSrc, &HCPhys);
    461458            AssertRCReturn(rc, rc);
    462459            pMapPages->PageList[iPage] = HCPhys >> X86_PAGE_SHIFT;
     
    501498 * @returns VBox status code.
    502499 * @param   pGVM            The ring-0 VM handle.
    503  * @param   pVM             The cross context VM handle.
    504500 * @param   idCpu           The calling EMT.  Necessary for getting the
    505501 *                          hypercall page and arguments.
    506502 * @thread  EMT(idCpu)
    507503 */
    508 VMMR0_INT_DECL(int) NEMR0MapPages(PGVM pGVM, PVMCC pVM, VMCPUID idCpu)
     504VMMR0_INT_DECL(int) NEMR0MapPages(PGVM pGVM, VMCPUID idCpu)
    509505{
    510506    /*
    511507     * Unpack the call.
    512508     */
    513     int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
     509    int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
    514510    if (RT_SUCCESS(rc))
    515511    {
    516         PVMCPUCC  pVCpu  = VMCC_GET_CPU(pVM, idCpu);
    517512        PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
    518513
    519         RTGCPHYS const          GCPhysSrc = pVCpu->nem.s.Hypercall.MapPages.GCPhysSrc;
    520         RTGCPHYS const          GCPhysDst = pVCpu->nem.s.Hypercall.MapPages.GCPhysDst;
    521         uint32_t const          cPages    = pVCpu->nem.s.Hypercall.MapPages.cPages;
    522         HV_MAP_GPA_FLAGS const  fFlags    = pVCpu->nem.s.Hypercall.MapPages.fFlags;
     514        RTGCPHYS const          GCPhysSrc = pGVCpu->nem.s.Hypercall.MapPages.GCPhysSrc;
     515        RTGCPHYS const          GCPhysDst = pGVCpu->nem.s.Hypercall.MapPages.GCPhysDst;
     516        uint32_t const          cPages    = pGVCpu->nem.s.Hypercall.MapPages.cPages;
     517        HV_MAP_GPA_FLAGS const  fFlags    = pGVCpu->nem.s.Hypercall.MapPages.fFlags;
    523518
    524519        /*
    525520         * Do the work.
    526521         */
    527         rc = nemR0WinMapPages(pGVM, pVM, pGVCpu, GCPhysSrc, GCPhysDst, cPages, fFlags);
     522        rc = nemR0WinMapPages(pGVM, pGVCpu, GCPhysSrc, GCPhysDst, cPages, fFlags);
    528523    }
    529524    return rc;
     
    581576 * @returns VBox status code.
    582577 * @param   pGVM            The ring-0 VM handle.
    583  * @param   pVM             The cross context VM handle.
    584578 * @param   idCpu           The calling EMT.  Necessary for getting the
    585579 *                          hypercall page and arguments.
    586580 * @thread  EMT(idCpu)
    587581 */
    588 VMMR0_INT_DECL(int) NEMR0UnmapPages(PGVM pGVM, PVMCC pVM, VMCPUID idCpu)
     582VMMR0_INT_DECL(int) NEMR0UnmapPages(PGVM pGVM, VMCPUID idCpu)
    589583{
    590584    /*
    591585     * Unpack the call.
    592586     */
    593     int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
     587    int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
    594588    if (RT_SUCCESS(rc))
    595589    {
    596         PVMCPUCC  pVCpu  = VMCC_GET_CPU(pVM, idCpu);
    597590        PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
    598591
    599         RTGCPHYS const GCPhys = pVCpu->nem.s.Hypercall.UnmapPages.GCPhys;
    600         uint32_t const cPages = pVCpu->nem.s.Hypercall.UnmapPages.cPages;
     592        RTGCPHYS const GCPhys = pGVCpu->nem.s.Hypercall.UnmapPages.GCPhys;
     593        uint32_t const cPages = pGVCpu->nem.s.Hypercall.UnmapPages.cPages;
    601594
    602595        /*
     
    622615NEM_TMPL_STATIC int nemR0WinExportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx)
    623616{
    624     PVMCPUCC                   pVCpu  = pGVCpu;
    625617    HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
    626618    AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
     
    633625    uint64_t const fWhat = ~pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK);
    634626    if (   !fWhat
    635         && pVCpu->nem.s.fCurrentInterruptWindows == pVCpu->nem.s.fDesiredInterruptWindows)
     627        && pGVCpu->nem.s.fCurrentInterruptWindows == pGVCpu->nem.s.fDesiredInterruptWindows)
    636628        return VINF_SUCCESS;
    637629    uintptr_t iReg = 0;
     
    867859        HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
    868860        pInput->Elements[iReg].Name                 = HvX64RegisterCr8;
    869         pInput->Elements[iReg].Value.Reg64          = CPUMGetGuestCR8(pVCpu);
     861        pInput->Elements[iReg].Value.Reg64          = CPUMGetGuestCR8(pGVCpu);
    870862        iReg++;
    871863    }
     
    879871        HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
    880872        pInput->Elements[iReg].Name                 = HvX64RegisterDr0;
    881         //pInput->Elements[iReg].Value.Reg64        = CPUMGetHyperDR0(pVCpu);
     873        //pInput->Elements[iReg].Value.Reg64        = CPUMGetHyperDR0(pGVCpu);
    882874        pInput->Elements[iReg].Value.Reg64          = pCtx->dr[0];
    883875        iReg++;
    884876        HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
    885877        pInput->Elements[iReg].Name                 = HvX64RegisterDr1;
    886         //pInput->Elements[iReg].Value.Reg64        = CPUMGetHyperDR1(pVCpu);
     878        //pInput->Elements[iReg].Value.Reg64        = CPUMGetHyperDR1(pGVCpu);
    887879        pInput->Elements[iReg].Value.Reg64          = pCtx->dr[1];
    888880        iReg++;
    889881        HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
    890882        pInput->Elements[iReg].Name                 = HvX64RegisterDr2;
    891         //pInput->Elements[iReg].Value.Reg64        = CPUMGetHyperDR2(pVCpu);
     883        //pInput->Elements[iReg].Value.Reg64        = CPUMGetHyperDR2(pGVCpu);
    892884        pInput->Elements[iReg].Value.Reg64          = pCtx->dr[2];
    893885        iReg++;
    894886        HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
    895887        pInput->Elements[iReg].Name                 = HvX64RegisterDr3;
    896         //pInput->Elements[iReg].Value.Reg64        = CPUMGetHyperDR3(pVCpu);
     888        //pInput->Elements[iReg].Value.Reg64        = CPUMGetHyperDR3(pGVCpu);
    897889        pInput->Elements[iReg].Value.Reg64          = pCtx->dr[3];
    898890        iReg++;
     
    902894        HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
    903895        pInput->Elements[iReg].Name                 = HvX64RegisterDr6;
    904         //pInput->Elements[iReg].Value.Reg64        = CPUMGetHyperDR6(pVCpu);
     896        //pInput->Elements[iReg].Value.Reg64        = CPUMGetHyperDR6(pGVCpu);
    905897        pInput->Elements[iReg].Value.Reg64          = pCtx->dr[6];
    906898        iReg++;
     
    910902        HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
    911903        pInput->Elements[iReg].Name                 = HvX64RegisterDr7;
    912         //pInput->Elements[iReg].Value.Reg64        = CPUMGetHyperDR7(pVCpu);
     904        //pInput->Elements[iReg].Value.Reg64        = CPUMGetHyperDR7(pGVCpu);
    913905        pInput->Elements[iReg].Value.Reg64          = pCtx->dr[7];
    914906        iReg++;
     
    11201112        HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
    11211113        pInput->Elements[iReg].Name                 = HvX64RegisterApicBase;
    1122         pInput->Elements[iReg].Value.Reg64          = APICGetBaseMsrNoCheck(pVCpu);
     1114        pInput->Elements[iReg].Value.Reg64          = APICGetBaseMsrNoCheck(pGVCpu);
    11231115        iReg++;
    11241116        HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
     
    11291121        HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
    11301122        pInput->Elements[iReg].Name                 = HvX64RegisterMtrrCap;
    1131         pInput->Elements[iReg].Value.Reg64          = CPUMGetGuestIa32MtrrCap(pVCpu);
     1123        pInput->Elements[iReg].Value.Reg64          = CPUMGetGuestIa32MtrrCap(pGVCpu);
    11321124        iReg++;
    11331125# endif
    11341126
    1135         PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
     1127        PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pGVCpu);
    11361128
    11371129        HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
     
    11921184
    11931185# if 0 /** @todo Why can't we write these on Intel systems? Not that we really care... */
    1194         const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM->pVM);
     1186        const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM);
    11951187        if (enmCpuVendor != CPUMCPUVENDOR_AMD)
    11961188        {
     
    12011193            HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
    12021194            pInput->Elements[iReg].Name                 = HvX64RegisterIa32FeatureControl;
    1203             pInput->Elements[iReg].Value.Reg64          = CPUMGetGuestIa32FeatureControl(pVCpu);
     1195            pInput->Elements[iReg].Value.Reg64          = CPUMGetGuestIa32FeatureControl(pGVCpu);
    12041196            iReg++;
    12051197        }
     
    12241216        pInput->Elements[iReg].Name                 = HvRegisterInterruptState;
    12251217        pInput->Elements[iReg].Value.Reg64          = 0;
    1226         if (   VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
    1227             && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip)
     1218        if (   VMCPU_FF_IS_SET(pGVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
     1219            && EMGetInhibitInterruptsPC(pGVCpu) == pCtx->rip)
    12281220            pInput->Elements[iReg].Value.InterruptState.InterruptShadow = 1;
    1229         if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
     1221        if (VMCPU_FF_IS_SET(pGVCpu, VMCPU_FF_BLOCK_NMIS))
    12301222            pInput->Elements[iReg].Value.InterruptState.NmiMasked = 1;
    12311223        iReg++;
     
    12331225    else if (fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT)
    12341226    {
    1235         if (   pVCpu->nem.s.fLastInterruptShadow
    1236             || (   VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
    1237                 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip))
     1227        if (   pGVCpu->nem.s.fLastInterruptShadow
     1228            || (   VMCPU_FF_IS_SET(pGVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
     1229                && EMGetInhibitInterruptsPC(pGVCpu) == pCtx->rip))
    12381230        {
    12391231            HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
    12401232            pInput->Elements[iReg].Name                 = HvRegisterInterruptState;
    12411233            pInput->Elements[iReg].Value.Reg64          = 0;
    1242             if (   VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
    1243                 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip)
     1234            if (   VMCPU_FF_IS_SET(pGVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
     1235                && EMGetInhibitInterruptsPC(pGVCpu) == pCtx->rip)
    12441236                pInput->Elements[iReg].Value.InterruptState.InterruptShadow = 1;
    12451237            /** @todo Retrieve NMI state, currently assuming it's zero. (yes this may happen on I/O) */
    1246             //if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
     1238            //if (VMCPU_FF_IS_ANY_SET(pGVCpu, VMCPU_FF_BLOCK_NMIS))
    12471239            //    pInput->Elements[iReg].Value.InterruptState.NmiMasked = 1;
    12481240            iReg++;
     
    12531245
    12541246    /* Interrupt windows. Always set if active as Hyper-V seems to be forgetful. */
    1255     uint8_t const fDesiredIntWin = pVCpu->nem.s.fDesiredInterruptWindows;
     1247    uint8_t const fDesiredIntWin = pGVCpu->nem.s.fDesiredInterruptWindows;
    12561248    if (   fDesiredIntWin
    1257         || pVCpu->nem.s.fCurrentInterruptWindows != fDesiredIntWin)
    1258     {
    1259         pVCpu->nem.s.fCurrentInterruptWindows = pVCpu->nem.s.fDesiredInterruptWindows;
     1249        || pGVCpu->nem.s.fCurrentInterruptWindows != fDesiredIntWin)
     1250    {
     1251        pGVCpu->nem.s.fCurrentInterruptWindows = pGVCpu->nem.s.fDesiredInterruptWindows;
    12601252        HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
    12611253        pInput->Elements[iReg].Name                                         = HvX64RegisterDeliverabilityNotifications;
     
    12961288 * @returns VBox status code
    12971289 * @param   pGVM        The ring-0 VM handle.
    1298  * @param   pVM         The cross context VM handle.
    12991290 * @param   idCpu       The calling EMT.  Necessary for getting the
    13001291 *                      hypercall page and arguments.
    13011292 */
    1302 VMMR0_INT_DECL(int)  NEMR0ExportState(PGVM pGVM, PVMCC pVM, VMCPUID idCpu)
     1293VMMR0_INT_DECL(int)  NEMR0ExportState(PGVM pGVM, VMCPUID idCpu)
    13031294{
    13041295#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
     
    13061297     * Validate the call.
    13071298     */
    1308     int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
     1299    int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
    13091300    if (RT_SUCCESS(rc))
    13101301    {
    1311         PVMCPUCC  pVCpu  = VMCC_GET_CPU(pVM, idCpu);
    13121302        PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
    13131303        AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
     
    13161306         * Call worker.
    13171307         */
    1318         rc = nemR0WinExportState(pGVM, pGVCpu, &pVCpu->cpum.GstCtx);
     1308        rc = nemR0WinExportState(pGVM, pGVCpu, &pGVCpu->cpum.GstCtx);
    13191309    }
    13201310    return rc;
    13211311#else
    1322     RT_NOREF(pGVM, pVM, idCpu);
     1312    RT_NOREF(pGVM, idCpu);
    13231313    return VERR_NOT_IMPLEMENTED;
    13241314#endif
     
    15761566     * Copy information to the CPUM context.
    15771567     */
    1578     PVMCPUCC pVCpu = pGVCpu;
    15791568    iReg = 0;
    15801569
     
    17451734            if (pCtx->cr0 != paValues[iReg].Reg64)
    17461735            {
    1747                 CPUMSetGuestCR0(pVCpu, paValues[iReg].Reg64);
     1736                CPUMSetGuestCR0(pGVCpu, paValues[iReg].Reg64);
    17481737                fMaybeChangedMode = true;
    17491738            }
     
    17611750            if (pCtx->cr3 != paValues[iReg].Reg64)
    17621751            {
    1763                 CPUMSetGuestCR3(pVCpu, paValues[iReg].Reg64);
     1752                CPUMSetGuestCR3(pGVCpu, paValues[iReg].Reg64);
    17641753                fUpdateCr3 = true;
    17651754            }
     
    17711760            if (pCtx->cr4 != paValues[iReg].Reg64)
    17721761            {
    1773                 CPUMSetGuestCR4(pVCpu, paValues[iReg].Reg64);
     1762                CPUMSetGuestCR4(pGVCpu, paValues[iReg].Reg64);
    17741763                fMaybeChangedMode = true;
    17751764            }
     
    17801769    {
    17811770        Assert(pInput->Names[iReg] == HvX64RegisterCr8);
    1782         APICSetTpr(pVCpu, (uint8_t)paValues[iReg].Reg64 << 4);
     1771        APICSetTpr(pGVCpu, (uint8_t)paValues[iReg].Reg64 << 4);
    17831772        iReg++;
    17841773    }
     
    17891778        Assert(pInput->Names[iReg] == HvX64RegisterDr7);
    17901779        if (pCtx->dr[7] != paValues[iReg].Reg64)
    1791             CPUMSetGuestDR7(pVCpu, paValues[iReg].Reg64);
     1780            CPUMSetGuestDR7(pGVCpu, paValues[iReg].Reg64);
    17921781        pCtx->fExtrn &= ~CPUMCTX_EXTRN_DR7; /* Hack alert! Avoids asserting when processing CPUMCTX_EXTRN_DR0_DR3. */
    17931782        iReg++;
     
    17981787        Assert(pInput->Names[iReg+3] == HvX64RegisterDr3);
    17991788        if (pCtx->dr[0] != paValues[iReg].Reg64)
    1800             CPUMSetGuestDR0(pVCpu, paValues[iReg].Reg64);
     1789            CPUMSetGuestDR0(pGVCpu, paValues[iReg].Reg64);
    18011790        iReg++;
    18021791        if (pCtx->dr[1] != paValues[iReg].Reg64)
    1803             CPUMSetGuestDR1(pVCpu, paValues[iReg].Reg64);
     1792            CPUMSetGuestDR1(pGVCpu, paValues[iReg].Reg64);
    18041793        iReg++;
    18051794        if (pCtx->dr[2] != paValues[iReg].Reg64)
    1806             CPUMSetGuestDR2(pVCpu, paValues[iReg].Reg64);
     1795            CPUMSetGuestDR2(pGVCpu, paValues[iReg].Reg64);
    18071796        iReg++;
    18081797        if (pCtx->dr[3] != paValues[iReg].Reg64)
    1809             CPUMSetGuestDR3(pVCpu, paValues[iReg].Reg64);
     1798            CPUMSetGuestDR3(pGVCpu, paValues[iReg].Reg64);
    18101799        iReg++;
    18111800    }
     
    18141803        Assert(pInput->Names[iReg] == HvX64RegisterDr6);
    18151804        if (pCtx->dr[6] != paValues[iReg].Reg64)
    1816             CPUMSetGuestDR6(pVCpu, paValues[iReg].Reg64);
     1805            CPUMSetGuestDR6(pGVCpu, paValues[iReg].Reg64);
    18171806        iReg++;
    18181807    }
     
    19371926        if (paValues[iReg].Reg64 != pCtx->msrEFER)
    19381927        {
    1939             Log7(("NEM/%u: MSR EFER changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrEFER, paValues[iReg].Reg64));
     1928            Log7(("NEM/%u: MSR EFER changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrEFER, paValues[iReg].Reg64));
    19401929            if ((paValues[iReg].Reg64 ^ pCtx->msrEFER) & MSR_K6_EFER_NXE)
    1941                 PGMNotifyNxeChanged(pVCpu, RT_BOOL(paValues[iReg].Reg64 & MSR_K6_EFER_NXE));
     1930                PGMNotifyNxeChanged(pGVCpu, RT_BOOL(paValues[iReg].Reg64 & MSR_K6_EFER_NXE));
    19421931            pCtx->msrEFER = paValues[iReg].Reg64;
    19431932            fMaybeChangedMode = true;
     
    19491938        Assert(pInput->Names[iReg] == HvX64RegisterKernelGsBase);
    19501939        if (pCtx->msrKERNELGSBASE != paValues[iReg].Reg64)
    1951             Log7(("NEM/%u: MSR KERNELGSBASE changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrKERNELGSBASE, paValues[iReg].Reg64));
     1940            Log7(("NEM/%u: MSR KERNELGSBASE changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrKERNELGSBASE, paValues[iReg].Reg64));
    19521941        pCtx->msrKERNELGSBASE = paValues[iReg].Reg64;
    19531942        iReg++;
     
    19571946        Assert(pInput->Names[iReg] == HvX64RegisterSysenterCs);
    19581947        if (pCtx->SysEnter.cs != paValues[iReg].Reg64)
    1959             Log7(("NEM/%u: MSR SYSENTER.CS changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->SysEnter.cs, paValues[iReg].Reg64));
     1948            Log7(("NEM/%u: MSR SYSENTER.CS changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->SysEnter.cs, paValues[iReg].Reg64));
    19601949        pCtx->SysEnter.cs = paValues[iReg].Reg64;
    19611950        iReg++;
     
    19631952        Assert(pInput->Names[iReg] == HvX64RegisterSysenterEip);
    19641953        if (pCtx->SysEnter.eip != paValues[iReg].Reg64)
    1965             Log7(("NEM/%u: MSR SYSENTER.EIP changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->SysEnter.eip, paValues[iReg].Reg64));
     1954            Log7(("NEM/%u: MSR SYSENTER.EIP changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->SysEnter.eip, paValues[iReg].Reg64));
    19661955        pCtx->SysEnter.eip = paValues[iReg].Reg64;
    19671956        iReg++;
     
    19691958        Assert(pInput->Names[iReg] == HvX64RegisterSysenterEsp);
    19701959        if (pCtx->SysEnter.esp != paValues[iReg].Reg64)
    1971             Log7(("NEM/%u: MSR SYSENTER.ESP changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->SysEnter.esp, paValues[iReg].Reg64));
     1960            Log7(("NEM/%u: MSR SYSENTER.ESP changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->SysEnter.esp, paValues[iReg].Reg64));
    19721961        pCtx->SysEnter.esp = paValues[iReg].Reg64;
    19731962        iReg++;
     
    19771966        Assert(pInput->Names[iReg] == HvX64RegisterStar);
    19781967        if (pCtx->msrSTAR != paValues[iReg].Reg64)
    1979             Log7(("NEM/%u: MSR STAR changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrSTAR, paValues[iReg].Reg64));
     1968            Log7(("NEM/%u: MSR STAR changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrSTAR, paValues[iReg].Reg64));
    19801969        pCtx->msrSTAR   = paValues[iReg].Reg64;
    19811970        iReg++;
     
    19831972        Assert(pInput->Names[iReg] == HvX64RegisterLstar);
    19841973        if (pCtx->msrLSTAR != paValues[iReg].Reg64)
    1985             Log7(("NEM/%u: MSR LSTAR changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrLSTAR, paValues[iReg].Reg64));
     1974            Log7(("NEM/%u: MSR LSTAR changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrLSTAR, paValues[iReg].Reg64));
    19861975        pCtx->msrLSTAR  = paValues[iReg].Reg64;
    19871976        iReg++;
     
    19891978        Assert(pInput->Names[iReg] == HvX64RegisterCstar);
    19901979        if (pCtx->msrCSTAR != paValues[iReg].Reg64)
    1991             Log7(("NEM/%u: MSR CSTAR changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrCSTAR, paValues[iReg].Reg64));
     1980            Log7(("NEM/%u: MSR CSTAR changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrCSTAR, paValues[iReg].Reg64));
    19921981        pCtx->msrCSTAR  = paValues[iReg].Reg64;
    19931982        iReg++;
     
    19951984        Assert(pInput->Names[iReg] == HvX64RegisterSfmask);
    19961985        if (pCtx->msrSFMASK != paValues[iReg].Reg64)
    1997             Log7(("NEM/%u: MSR SFMASK changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrSFMASK, paValues[iReg].Reg64));
     1986            Log7(("NEM/%u: MSR SFMASK changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrSFMASK, paValues[iReg].Reg64));
    19981987        pCtx->msrSFMASK = paValues[iReg].Reg64;
    19991988        iReg++;
     
    20021991    {
    20031992        Assert(pInput->Names[iReg] == HvX64RegisterApicBase);
    2004         const uint64_t uOldBase = APICGetBaseMsrNoCheck(pVCpu);
     1993        const uint64_t uOldBase = APICGetBaseMsrNoCheck(pGVCpu);
    20051994        if (paValues[iReg].Reg64 != uOldBase)
    20061995        {
    20071996            Log7(("NEM/%u: MSR APICBase changed %RX64 -> %RX64 (%RX64)\n",
    2008                   pVCpu->idCpu, uOldBase, paValues[iReg].Reg64, paValues[iReg].Reg64 ^ uOldBase));
    2009             int rc2 = APICSetBaseMsr(pVCpu, paValues[iReg].Reg64);
     1997                  pGVCpu->idCpu, uOldBase, paValues[iReg].Reg64, paValues[iReg].Reg64 ^ uOldBase));
     1998            int rc2 = APICSetBaseMsr(pGVCpu, paValues[iReg].Reg64);
    20101999            AssertLogRelMsg(rc2 == VINF_SUCCESS, ("rc2=%Rrc [%#RX64]\n", rc2, paValues[iReg].Reg64));
    20112000        }
     
    20142003        Assert(pInput->Names[iReg] == HvX64RegisterPat);
    20152004        if (pCtx->msrPAT != paValues[iReg].Reg64)
    2016             Log7(("NEM/%u: MSR PAT changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrPAT, paValues[iReg].Reg64));
     2005            Log7(("NEM/%u: MSR PAT changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrPAT, paValues[iReg].Reg64));
    20172006        pCtx->msrPAT    = paValues[iReg].Reg64;
    20182007        iReg++;
     
    20202009# if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
    20212010        Assert(pInput->Names[iReg] == HvX64RegisterMtrrCap);
    2022         if (paValues[iReg].Reg64 != CPUMGetGuestIa32MtrrCap(pVCpu))
    2023             Log7(("NEM/%u: MSR MTRR_CAP changed %RX64 -> %RX64 (!!)\n", pVCpu->idCpu, CPUMGetGuestIa32MtrrCap(pVCpu), paValues[iReg].Reg64));
     2011        if (paValues[iReg].Reg64 != CPUMGetGuestIa32MtrrCap(pGVCpu))
     2012            Log7(("NEM/%u: MSR MTRR_CAP changed %RX64 -> %RX64 (!!)\n", pGVCpu->idCpu, CPUMGetGuestIa32MtrrCap(pGVCpu), paValues[iReg].Reg64));
    20242013        iReg++;
    20252014# endif
    20262015
    2027         PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
     2016        PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pGVCpu);
    20282017        Assert(pInput->Names[iReg] == HvX64RegisterMtrrDefType);
    20292018        if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrDefType )
    2030             Log7(("NEM/%u: MSR MTRR_DEF_TYPE changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrDefType, paValues[iReg].Reg64));
     2019            Log7(("NEM/%u: MSR MTRR_DEF_TYPE changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrDefType, paValues[iReg].Reg64));
    20312020        pCtxMsrs->msr.MtrrDefType = paValues[iReg].Reg64;
    20322021        iReg++;
     
    20362025        Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix64k00000);
    20372026        if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix64K_00000 )
    2038             Log7(("NEM/%u: MSR MTRR_FIX16K_00000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix64K_00000, paValues[iReg].Reg64));
     2027            Log7(("NEM/%u: MSR MTRR_FIX16K_00000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix64K_00000, paValues[iReg].Reg64));
    20392028        pCtxMsrs->msr.MtrrFix64K_00000 = paValues[iReg].Reg64;
    20402029        iReg++;
     
    20422031        Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix16k80000);
    20432032        if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix16K_80000 )
    2044             Log7(("NEM/%u: MSR MTRR_FIX16K_80000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix16K_80000, paValues[iReg].Reg64));
     2033            Log7(("NEM/%u: MSR MTRR_FIX16K_80000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix16K_80000, paValues[iReg].Reg64));
    20452034        pCtxMsrs->msr.MtrrFix16K_80000 = paValues[iReg].Reg64;
    20462035        iReg++;
     
    20482037        Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix16kA0000);
    20492038        if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix16K_A0000 )
    2050             Log7(("NEM/%u: MSR MTRR_FIX16K_A0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix16K_A0000, paValues[iReg].Reg64));
     2039            Log7(("NEM/%u: MSR MTRR_FIX16K_A0000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix16K_A0000, paValues[iReg].Reg64));
    20512040        pCtxMsrs->msr.MtrrFix16K_A0000 = paValues[iReg].Reg64;
    20522041        iReg++;
     
    20542043        Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kC0000);
    20552044        if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_C0000 )
    2056             Log7(("NEM/%u: MSR MTRR_FIX16K_C0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_C0000, paValues[iReg].Reg64));
     2045            Log7(("NEM/%u: MSR MTRR_FIX16K_C0000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_C0000, paValues[iReg].Reg64));
    20572046        pCtxMsrs->msr.MtrrFix4K_C0000 = paValues[iReg].Reg64;
    20582047        iReg++;
     
    20602049        Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kC8000);
    20612050        if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_C8000 )
    2062             Log7(("NEM/%u: MSR MTRR_FIX16K_C8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_C8000, paValues[iReg].Reg64));
     2051            Log7(("NEM/%u: MSR MTRR_FIX16K_C8000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_C8000, paValues[iReg].Reg64));
    20632052        pCtxMsrs->msr.MtrrFix4K_C8000 = paValues[iReg].Reg64;
    20642053        iReg++;
     
    20662055        Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kD0000);
    20672056        if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_D0000 )
    2068             Log7(("NEM/%u: MSR MTRR_FIX16K_D0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_D0000, paValues[iReg].Reg64));
     2057            Log7(("NEM/%u: MSR MTRR_FIX16K_D0000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_D0000, paValues[iReg].Reg64));
    20692058        pCtxMsrs->msr.MtrrFix4K_D0000 = paValues[iReg].Reg64;
    20702059        iReg++;
     
    20722061        Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kD8000);
    20732062        if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_D8000 )
    2074             Log7(("NEM/%u: MSR MTRR_FIX16K_D8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_D8000, paValues[iReg].Reg64));
     2063            Log7(("NEM/%u: MSR MTRR_FIX16K_D8000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_D8000, paValues[iReg].Reg64));
    20752064        pCtxMsrs->msr.MtrrFix4K_D8000 = paValues[iReg].Reg64;
    20762065        iReg++;
     
    20782067        Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kE0000);
    20792068        if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_E0000 )
    2080             Log7(("NEM/%u: MSR MTRR_FIX16K_E0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_E0000, paValues[iReg].Reg64));
     2069            Log7(("NEM/%u: MSR MTRR_FIX16K_E0000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_E0000, paValues[iReg].Reg64));
    20812070        pCtxMsrs->msr.MtrrFix4K_E0000 = paValues[iReg].Reg64;
    20822071        iReg++;
     
    20842073        Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kE8000);
    20852074        if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_E8000 )
    2086             Log7(("NEM/%u: MSR MTRR_FIX16K_E8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_E8000, paValues[iReg].Reg64));
     2075            Log7(("NEM/%u: MSR MTRR_FIX16K_E8000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_E8000, paValues[iReg].Reg64));
    20872076        pCtxMsrs->msr.MtrrFix4K_E8000 = paValues[iReg].Reg64;
    20882077        iReg++;
     
    20902079        Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kF0000);
    20912080        if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_F0000 )
    2092             Log7(("NEM/%u: MSR MTRR_FIX16K_F0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_F0000, paValues[iReg].Reg64));
     2081            Log7(("NEM/%u: MSR MTRR_FIX16K_F0000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_F0000, paValues[iReg].Reg64));
    20932082        pCtxMsrs->msr.MtrrFix4K_F0000 = paValues[iReg].Reg64;
    20942083        iReg++;
     
    20962085        Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kF8000);
    20972086        if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_F8000 )
    2098             Log7(("NEM/%u: MSR MTRR_FIX16K_F8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_F8000, paValues[iReg].Reg64));
     2087            Log7(("NEM/%u: MSR MTRR_FIX16K_F8000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_F8000, paValues[iReg].Reg64));
    20992088        pCtxMsrs->msr.MtrrFix4K_F8000 = paValues[iReg].Reg64;
    21002089        iReg++;
     
    21022091        Assert(pInput->Names[iReg] == HvX64RegisterTscAux);
    21032092        if (paValues[iReg].Reg64 != pCtxMsrs->msr.TscAux )
    2104             Log7(("NEM/%u: MSR TSC_AUX changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.TscAux, paValues[iReg].Reg64));
     2093            Log7(("NEM/%u: MSR TSC_AUX changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.TscAux, paValues[iReg].Reg64));
    21052094        pCtxMsrs->msr.TscAux = paValues[iReg].Reg64;
    21062095        iReg++;
     
    21112100            Assert(pInput->Names[iReg] == HvX64RegisterIa32MiscEnable);
    21122101            if (paValues[iReg].Reg64 != pCtxMsrs->msr.MiscEnable)
    2113                 Log7(("NEM/%u: MSR MISC_ENABLE changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MiscEnable, paValues[iReg].Reg64));
     2102                Log7(("NEM/%u: MSR MISC_ENABLE changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MiscEnable, paValues[iReg].Reg64));
    21142103            pCtxMsrs->msr.MiscEnable = paValues[iReg].Reg64;
    21152104            iReg++;
     
    21212110            Assert(pInput->Names[iReg] == HvX64RegisterIa32FeatureControl);
    21222111            if (paValues[iReg].Reg64 != pCtx->hwvirt.vmx.Msrs.u64FeatCtrl)
    2123                 Log7(("NEM/%u: MSR FEATURE_CONTROL changed %RX64 -> %RX64 (!!)\n", pVCpu->idCpu, pCtx->hwvirt.vmx.Msrs.u64FeatCtrl, paValues[iReg].Reg64));
     2112                Log7(("NEM/%u: MSR FEATURE_CONTROL changed %RX64 -> %RX64 (!!)\n", pGVCpu->idCpu, pCtx->hwvirt.vmx.Msrs.u64FeatCtrl, paValues[iReg].Reg64));
    21242113            iReg++;
    21252114        }
     
    21352124        if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))
    21362125        {
    2137             pVCpu->nem.s.fLastInterruptShadow = paValues[iReg].InterruptState.InterruptShadow;
     2126            pGVCpu->nem.s.fLastInterruptShadow = paValues[iReg].InterruptState.InterruptShadow;
    21382127            if (paValues[iReg].InterruptState.InterruptShadow)
    2139                 EMSetInhibitInterruptsPC(pVCpu, paValues[iReg + 1].Reg64);
     2128                EMSetInhibitInterruptsPC(pGVCpu, paValues[iReg + 1].Reg64);
    21402129            else
    2141                 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
     2130                VMCPU_FF_CLEAR(pGVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
    21422131        }
    21432132
     
    21452134        {
    21462135            if (paValues[iReg].InterruptState.NmiMasked)
    2147                 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
     2136                VMCPU_FF_SET(pGVCpu, VMCPU_FF_BLOCK_NMIS);
    21482137            else
    2149                 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
     2138                VMCPU_FF_CLEAR(pGVCpu, VMCPU_FF_BLOCK_NMIS);
    21502139        }
    21512140
     
    21852174    if (fMaybeChangedMode)
    21862175    {
    2187         rc = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
     2176        rc = PGMChangeMode(pGVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
    21882177        AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_1);
    21892178    }
     
    21942183        {
    21952184            LogFlow(("nemR0WinImportState: -> PGMUpdateCR3!\n"));
    2196             rc = PGMUpdateCR3(pVCpu, pCtx->cr3);
     2185            rc = PGMUpdateCR3(pGVCpu, pCtx->cr3);
    21972186            AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_2);
    21982187        }
     
    22142203 * @returns VBox status code
    22152204 * @param   pGVM        The ring-0 VM handle.
    2216  * @param   pVM         The cross context VM handle.
    22172205 * @param   idCpu       The calling EMT.  Necessary for getting the
    22182206 *                      hypercall page and arguments.
     
    22202208 *                      CPUMCTX_EXTERN_ALL for everything.
    22212209 */
    2222 VMMR0_INT_DECL(int) NEMR0ImportState(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, uint64_t fWhat)
     2210VMMR0_INT_DECL(int) NEMR0ImportState(PGVM pGVM, VMCPUID idCpu, uint64_t fWhat)
    22232211{
    22242212#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
     
    22262214     * Validate the call.
    22272215     */
    2228     int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
     2216    int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
    22292217    if (RT_SUCCESS(rc))
    22302218    {
    2231         PVMCPUCC  pVCpu  = VMCC_GET_CPU(pVM, idCpu);
    22322219        PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
    22332220        AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
     
    22362223         * Call worker.
    22372224         */
    2238         rc = nemR0WinImportState(pGVM, pGVCpu, &pVCpu->cpum.GstCtx, fWhat, false /*fCanUpdateCr3*/);
     2225        rc = nemR0WinImportState(pGVM, pGVCpu, &pGVCpu->cpum.GstCtx, fWhat, false /*fCanUpdateCr3*/);
    22392226    }
    22402227    return rc;
    22412228#else
    2242     RT_NOREF(pGVM, pVM, idCpu, fWhat);
     2229    RT_NOREF(pGVM, idCpu, fWhat);
    22432230    return VERR_NOT_IMPLEMENTED;
    22442231#endif
     
    23002287 * @returns VBox status code
    23012288 * @param   pGVM        The ring-0 VM handle.
    2302  * @param   pVM         The cross context VM handle.
    23032289 * @param   idCpu       The calling EMT.  Necessary for getting the
    23042290 *                      hypercall page and arguments.
    23052291 */
    2306 VMMR0_INT_DECL(int) NEMR0QueryCpuTick(PGVM pGVM, PVMCC pVM, VMCPUID idCpu)
     2292VMMR0_INT_DECL(int) NEMR0QueryCpuTick(PGVM pGVM, VMCPUID idCpu)
    23072293{
    23082294#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
     
    23102296     * Validate the call.
    23112297     */
    2312     int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
     2298    int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
    23132299    if (RT_SUCCESS(rc))
    23142300    {
    2315         PVMCPUCC  pVCpu  = VMCC_GET_CPU(pVM, idCpu);
    23162301        PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
    23172302        AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
     
    23202305         * Call worker.
    23212306         */
    2322         pVCpu->nem.s.Hypercall.QueryCpuTick.cTicks = 0;
    2323         pVCpu->nem.s.Hypercall.QueryCpuTick.uAux   = 0;
    2324         rc = nemR0WinQueryCpuTick(pGVM, pGVCpu, &pVCpu->nem.s.Hypercall.QueryCpuTick.cTicks,
    2325                                   &pVCpu->nem.s.Hypercall.QueryCpuTick.uAux);
     2307        pGVCpu->nem.s.Hypercall.QueryCpuTick.cTicks = 0;
     2308        pGVCpu->nem.s.Hypercall.QueryCpuTick.uAux   = 0;
     2309        rc = nemR0WinQueryCpuTick(pGVM, pGVCpu, &pGVCpu->nem.s.Hypercall.QueryCpuTick.cTicks,
     2310                                  &pGVCpu->nem.s.Hypercall.QueryCpuTick.uAux);
    23262311    }
    23272312    return rc;
    23282313#else
    2329     RT_NOREF(pGVM, pVM, idCpu);
     2314    RT_NOREF(pGVM, idCpu);
    23302315    return VERR_NOT_IMPLEMENTED;
    23312316#endif
     
    24092394 * @returns VBox status code
    24102395 * @param   pGVM            The ring-0 VM handle.
    2411  * @param   pVM             The cross context VM handle.
    24122396 * @param   idCpu           The calling EMT.  Necessary for getting the
    24132397 *                          hypercall page and arguments.
    24142398 * @param   uPausedTscValue The TSC value at the time of pausing.
    24152399 */
    2416 VMMR0_INT_DECL(int) NEMR0ResumeCpuTickOnAll(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, uint64_t uPausedTscValue)
     2400VMMR0_INT_DECL(int) NEMR0ResumeCpuTickOnAll(PGVM pGVM, VMCPUID idCpu, uint64_t uPausedTscValue)
    24172401{
    24182402#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
     
    24202404     * Validate the call.
    24212405     */
    2422     int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
     2406    int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
    24232407    if (RT_SUCCESS(rc))
    24242408    {
    2425         PVMCPUCC  pVCpu  = VMCC_GET_CPU(pVM, idCpu);
    24262409        PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
    24272410        AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
     
    24302413         * Call worker.
    24312414         */
    2432         pVCpu->nem.s.Hypercall.QueryCpuTick.cTicks = 0;
    2433         pVCpu->nem.s.Hypercall.QueryCpuTick.uAux   = 0;
     2415        pGVCpu->nem.s.Hypercall.QueryCpuTick.cTicks = 0;
     2416        pGVCpu->nem.s.Hypercall.QueryCpuTick.uAux   = 0;
    24342417        rc = nemR0WinResumeCpuTickOnAll(pGVM, pGVCpu, uPausedTscValue);
    24352418    }
    24362419    return rc;
    24372420#else
    2438     RT_NOREF(pGVM, pVM, idCpu, uPausedTscValue);
     2421    RT_NOREF(pGVM, idCpu, uPausedTscValue);
    24392422    return VERR_NOT_IMPLEMENTED;
    24402423#endif
     
    24462429#ifdef NEM_WIN_WITH_RING0_RUNLOOP
    24472430    if (pGVM->nemr0.s.fMayUseRing0Runloop)
    2448         return nemHCWinRunGC(pGVM, &pGVM->aCpus[idCpu], pGVM, &pGVM->aCpus[idCpu]);
     2431        return nemHCWinRunGC(pGVM, &pGVM->aCpus[idCpu]);
    24492432    return VERR_NEM_RING3_ONLY;
    24502433#else
     
    24602443 * @returns VBox status code.
    24612444 * @param   pGVM        The ring-0 VM handle.
    2462  * @param   pVM         The cross context VM handle.
    24632445 * @param   idCpu       The calling EMT, or NIL.  Necessary for getting the hypercall
    24642446 *                      page and arguments.
    24652447 */
    2466 VMMR0_INT_DECL(int)  NEMR0UpdateStatistics(PGVM pGVM, PVMCC pVM, VMCPUID idCpu)
     2448VMMR0_INT_DECL(int)  NEMR0UpdateStatistics(PGVM pGVM, VMCPUID idCpu)
    24672449{
    24682450    /*
     
    24712453    int rc;
    24722454    if (idCpu == NIL_VMCPUID)
    2473         rc = GVMMR0ValidateGVMandVM(pGVM, pVM);
     2455        rc = GVMMR0ValidateGVM(pGVM);
    24742456    else
    2475         rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
     2457        rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
    24762458    if (RT_SUCCESS(rc))
    24772459    {
     
    25062488                if (uResult == HV_STATUS_SUCCESS)
    25072489                {
    2508                     pVM->nem.s.R0Stats.cPagesAvailable = pOutput->PagesAvailable;
    2509                     pVM->nem.s.R0Stats.cPagesInUse     = pOutput->PagesInUse;
     2490                    pGVM->nem.s.R0Stats.cPagesAvailable = pOutput->PagesAvailable;
     2491                    pGVM->nem.s.R0Stats.cPagesInUse     = pOutput->PagesInUse;
    25102492                    rc = VINF_SUCCESS;
    25112493                }
     
    25332515 *
    25342516 * @param   pGVM        The ring-0 VM handle.
    2535  * @param   pVM         The cross context VM handle.
    25362517 * @param   idCpu       The calling EMT.
    25372518 * @param   u64Arg      What to query.  0 == registers.
    25382519 */
    2539 VMMR0_INT_DECL(int) NEMR0DoExperiment(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, uint64_t u64Arg)
     2520VMMR0_INT_DECL(int) NEMR0DoExperiment(PGVM pGVM, VMCPUID idCpu, uint64_t u64Arg)
    25402521{
    25412522    /*
    25422523     * Resolve CPU structures.
    25432524     */
    2544     int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
     2525    int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
    25452526    if (RT_SUCCESS(rc))
    25462527    {
     
    25482529
    25492530        PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
    2550         PVMCPUCC  pVCpu  = VMCC_GET_CPU(pVM, idCpu);
    25512531        if (u64Arg == 0)
    25522532        {
     
    25642544            pInput->VpIndex     = pGVCpu->idCpu;
    25652545            pInput->fFlags      = 0;
    2566             pInput->Names[0]    = (HV_REGISTER_NAME)pVCpu->nem.s.Hypercall.Experiment.uItem;
     2546            pInput->Names[0]    = (HV_REGISTER_NAME)pGVCpu->nem.s.Hypercall.Experiment.uItem;
    25672547
    25682548            uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, 1),
    25692549                                                       pGVCpu->nemr0.s.HypercallData.HCPhysPage,
    25702550                                                       pGVCpu->nemr0.s.HypercallData.HCPhysPage + cbInput);
    2571             pVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_MAKE_CALL_REP_RET(1);
    2572             pVCpu->nem.s.Hypercall.Experiment.uStatus  = uResult;
    2573             pVCpu->nem.s.Hypercall.Experiment.uLoValue = paValues[0].Reg128.Low64;
    2574             pVCpu->nem.s.Hypercall.Experiment.uHiValue = paValues[0].Reg128.High64;
     2551            pGVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_MAKE_CALL_REP_RET(1);
     2552            pGVCpu->nem.s.Hypercall.Experiment.uStatus  = uResult;
     2553            pGVCpu->nem.s.Hypercall.Experiment.uLoValue = paValues[0].Reg128.Low64;
     2554            pGVCpu->nem.s.Hypercall.Experiment.uHiValue = paValues[0].Reg128.High64;
    25752555            rc = VINF_SUCCESS;
    25762556        }
     
    25882568
    25892569            pInput->PartitionId  = pGVM->nemr0.s.idHvPartition;
    2590             pInput->PropertyCode = (HV_PARTITION_PROPERTY_CODE)pVCpu->nem.s.Hypercall.Experiment.uItem;
     2570            pInput->PropertyCode = (HV_PARTITION_PROPERTY_CODE)pGVCpu->nem.s.Hypercall.Experiment.uItem;
    25912571            pInput->uPadding     = 0;
    25922572
     
    25942574                                                       pGVCpu->nemr0.s.HypercallData.HCPhysPage,
    25952575                                                       pGVCpu->nemr0.s.HypercallData.HCPhysPage + cbInput);
    2596             pVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_STATUS_SUCCESS;
    2597             pVCpu->nem.s.Hypercall.Experiment.uStatus  = uResult;
    2598             pVCpu->nem.s.Hypercall.Experiment.uLoValue = pOutput->PropertyValue;
    2599             pVCpu->nem.s.Hypercall.Experiment.uHiValue = 0;
     2576            pGVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_STATUS_SUCCESS;
     2577            pGVCpu->nem.s.Hypercall.Experiment.uStatus  = uResult;
     2578            pGVCpu->nem.s.Hypercall.Experiment.uLoValue = pOutput->PropertyValue;
     2579            pGVCpu->nem.s.Hypercall.Experiment.uHiValue = 0;
    26002580            rc = VINF_SUCCESS;
    26012581        }
     
    26122592            pInput->VpIndex     = pGVCpu->idCpu;
    26132593            pInput->RsvdZ      = 0;
    2614             pInput->Elements[0].Name = (HV_REGISTER_NAME)pVCpu->nem.s.Hypercall.Experiment.uItem;
    2615             pInput->Elements[0].Value.Reg128.High64 = pVCpu->nem.s.Hypercall.Experiment.uHiValue;
    2616             pInput->Elements[0].Value.Reg128.Low64  = pVCpu->nem.s.Hypercall.Experiment.uLoValue;
     2594            pInput->Elements[0].Name = (HV_REGISTER_NAME)pGVCpu->nem.s.Hypercall.Experiment.uItem;
     2595            pInput->Elements[0].Value.Reg128.High64 = pGVCpu->nem.s.Hypercall.Experiment.uHiValue;
     2596            pInput->Elements[0].Value.Reg128.Low64  = pGVCpu->nem.s.Hypercall.Experiment.uLoValue;
    26172597
    26182598            uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1),
    26192599                                                       pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0);
    2620             pVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_MAKE_CALL_REP_RET(1);
    2621             pVCpu->nem.s.Hypercall.Experiment.uStatus  = uResult;
     2600            pGVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_MAKE_CALL_REP_RET(1);
     2601            pGVCpu->nem.s.Hypercall.Experiment.uStatus  = uResult;
    26222602            rc = VINF_SUCCESS;
    26232603        }
  • trunk/src/VBox/VMM/VMMR0/PDMR0Device.cpp

    r80334 r80346  
    6060*   Internal Functions                                                                                                           *
    6161*********************************************************************************************************************************/
    62 static bool pdmR0IsaSetIrq(PVMCC pVM, int iIrq, int iLevel, uint32_t uTagSrc);
     62static bool pdmR0IsaSetIrq(PGVM pGVM, int iIrq, int iLevel, uint32_t uTagSrc);
    6363
    6464
     
    132132    LogFlow(("pdmR0DevHlp_PCISetIrq: caller=%p/%d: pPciDev=%p:{%#x} iIrq=%d iLevel=%d\n",
    133133             pDevIns, pDevIns->iInstance, pPciDev, pPciDev->uDevFn, iIrq, iLevel));
    134     PVMCC        pVM     = pDevIns->Internal.s.pVMR0;
     134    PGVM         pGVM    = pDevIns->Internal.s.pVMR0;
    135135    PPDMPCIBUS   pPciBus = pPciDev->Int.s.pPdmBusR0;
    136136
    137     pdmLock(pVM);
     137    pdmLock(pGVM);
    138138    uint32_t uTagSrc;
    139139    if (iLevel & PDM_IRQ_LEVEL_HIGH)
    140140    {
    141         pDevIns->Internal.s.uLastIrqTag = uTagSrc = pdmCalcIrqTag(pVM, pDevIns->idTracing);
     141        pDevIns->Internal.s.uLastIrqTag = uTagSrc = pdmCalcIrqTag(pGVM, pDevIns->idTracing);
    142142        if (iLevel == PDM_IRQ_LEVEL_HIGH)
    143             VBOXVMM_PDM_IRQ_HIGH(VMMGetCpu(pVM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc));
     143            VBOXVMM_PDM_IRQ_HIGH(VMMGetCpu(pGVM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc));
    144144        else
    145             VBOXVMM_PDM_IRQ_HILO(VMMGetCpu(pVM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc));
     145            VBOXVMM_PDM_IRQ_HILO(VMMGetCpu(pGVM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc));
    146146    }
    147147    else
     
    153153        pPciBus->pfnSetIrqR0(pPciBus->pDevInsR0, pPciDev, iIrq, iLevel, uTagSrc);
    154154
    155         pdmUnlock(pVM);
     155        pdmUnlock(pGVM);
    156156
    157157        if (iLevel == PDM_IRQ_LEVEL_LOW)
    158             VBOXVMM_PDM_IRQ_LOW(VMMGetCpu(pVM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc));
     158            VBOXVMM_PDM_IRQ_LOW(VMMGetCpu(pGVM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc));
    159159    }
    160160    else
    161161    {
    162         pdmUnlock(pVM);
     162        pdmUnlock(pGVM);
    163163
    164164        /* queue for ring-3 execution. */
    165         PPDMDEVHLPTASK pTask = (PPDMDEVHLPTASK)PDMQueueAlloc(pVM->pdm.s.pDevHlpQueueR0);
     165        PPDMDEVHLPTASK pTask = (PPDMDEVHLPTASK)PDMQueueAlloc(pGVM->pdm.s.pDevHlpQueueR0);
    166166        AssertReturnVoid(pTask);
    167167
     
    171171        pTask->u.PciSetIRQ.iLevel = iLevel;
    172172        pTask->u.PciSetIRQ.uTagSrc = uTagSrc;
    173         pTask->u.PciSetIRQ.pPciDevR3 = MMHyperR0ToR3(pVM, pPciDev);
    174 
    175         PDMQueueInsertEx(pVM->pdm.s.pDevHlpQueueR0, &pTask->Core, 0);
     173        pTask->u.PciSetIRQ.pPciDevR3 = MMHyperR0ToR3(pGVM, pPciDev);
     174
     175        PDMQueueInsertEx(pGVM->pdm.s.pDevHlpQueueR0, &pTask->Core, 0);
    176176    }
    177177
     
    185185    PDMDEV_ASSERT_DEVINS(pDevIns);
    186186    LogFlow(("pdmR0DevHlp_ISASetIrq: caller=%p/%d: iIrq=%d iLevel=%d\n", pDevIns, pDevIns->iInstance, iIrq, iLevel));
    187     PVMCC pVM = pDevIns->Internal.s.pVMR0;
    188 
    189     pdmLock(pVM);
     187    PGVM pGVM = pDevIns->Internal.s.pVMR0;
     188
     189    pdmLock(pGVM);
    190190    uint32_t uTagSrc;
    191191    if (iLevel & PDM_IRQ_LEVEL_HIGH)
    192192    {
    193         pDevIns->Internal.s.uLastIrqTag = uTagSrc = pdmCalcIrqTag(pVM, pDevIns->idTracing);
     193        pDevIns->Internal.s.uLastIrqTag = uTagSrc = pdmCalcIrqTag(pGVM, pDevIns->idTracing);
    194194        if (iLevel == PDM_IRQ_LEVEL_HIGH)
    195             VBOXVMM_PDM_IRQ_HIGH(VMMGetCpu(pVM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc));
     195            VBOXVMM_PDM_IRQ_HIGH(VMMGetCpu(pGVM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc));
    196196        else
    197             VBOXVMM_PDM_IRQ_HILO(VMMGetCpu(pVM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc));
     197            VBOXVMM_PDM_IRQ_HILO(VMMGetCpu(pGVM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc));
    198198    }
    199199    else
    200200        uTagSrc = pDevIns->Internal.s.uLastIrqTag;
    201201
    202     bool fRc = pdmR0IsaSetIrq(pVM, iIrq, iLevel, uTagSrc);
     202    bool fRc = pdmR0IsaSetIrq(pGVM, iIrq, iLevel, uTagSrc);
    203203
    204204    if (iLevel == PDM_IRQ_LEVEL_LOW && fRc)
    205         VBOXVMM_PDM_IRQ_LOW(VMMGetCpu(pVM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc));
    206     pdmUnlock(pVM);
     205        VBOXVMM_PDM_IRQ_LOW(VMMGetCpu(pGVM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc));
     206    pdmUnlock(pGVM);
    207207    LogFlow(("pdmR0DevHlp_ISASetIrq: caller=%p/%d: returns void; uTagSrc=%#x\n", pDevIns, pDevIns->iInstance, uTagSrc));
    208208}
     
    214214    PDMDEV_ASSERT_DEVINS(pDevIns);
    215215    LogFlow(("pdmR0DevHlp_IoApicSendMsi: caller=%p/%d: GCPhys=%RGp uValue=%#x\n", pDevIns, pDevIns->iInstance, GCPhys, uValue));
    216     PVMCC pVM = pDevIns->Internal.s.pVMR0;
     216    PGVM pGVM = pDevIns->Internal.s.pVMR0;
    217217
    218218    uint32_t uTagSrc;
    219     pDevIns->Internal.s.uLastIrqTag = uTagSrc = pdmCalcIrqTag(pVM, pDevIns->idTracing);
    220     VBOXVMM_PDM_IRQ_HILO(VMMGetCpu(pVM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc));
    221 
    222     if (pVM->pdm.s.IoApic.pDevInsR0)
    223         pVM->pdm.s.IoApic.pfnSendMsiR0(pVM->pdm.s.IoApic.pDevInsR0, GCPhys, uValue, uTagSrc);
     219    pDevIns->Internal.s.uLastIrqTag = uTagSrc = pdmCalcIrqTag(pGVM, pDevIns->idTracing);
     220    VBOXVMM_PDM_IRQ_HILO(VMMGetCpu(pGVM), RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc));
     221
     222    if (pGVM->pdm.s.IoApic.pDevInsR0)
     223        pGVM->pdm.s.IoApic.pfnSendMsiR0(pGVM->pdm.s.IoApic.pDevInsR0, GCPhys, uValue, uTagSrc);
    224224    else
    225225        AssertFatalMsgFailed(("Lazy bastards!"));
     
    518518{
    519519    PDMDEV_ASSERT_DEVINS(pDevIns);
    520     PVMCC pVM = pDevIns->Internal.s.pVMR0;
     520    PGVM pGVM = pDevIns->Internal.s.pVMR0;
    521521    LogFlow(("pdmR0IoApicHlp_ApicBusDeliver: caller=%p/%d: u8Dest=%RX8 u8DestMode=%RX8 u8DeliveryMode=%RX8 uVector=%RX8 u8Polarity=%RX8 u8TriggerMode=%RX8 uTagSrc=%#x\n",
    522522             pDevIns, pDevIns->iInstance, u8Dest, u8DestMode, u8DeliveryMode, uVector, u8Polarity, u8TriggerMode, uTagSrc));
    523     return APICBusDeliver(pVM, u8Dest, u8DestMode, u8DeliveryMode, uVector, u8Polarity, u8TriggerMode, uTagSrc);
     523    return APICBusDeliver(pGVM, u8Dest, u8DestMode, u8DeliveryMode, uVector, u8Polarity, u8TriggerMode, uTagSrc);
    524524}
    525525
     
    567567    PDMDEV_ASSERT_DEVINS(pDevIns);
    568568    Log4(("pdmR0PciHlp_IsaSetIrq: iIrq=%d iLevel=%d uTagSrc=%#x\n", iIrq, iLevel, uTagSrc));
    569     PVMCC pVM = pDevIns->Internal.s.pVMR0;
    570 
    571     pdmLock(pVM);
    572     pdmR0IsaSetIrq(pVM, iIrq, iLevel, uTagSrc);
    573     pdmUnlock(pVM);
     569    PGVM pGVM = pDevIns->Internal.s.pVMR0;
     570
     571    pdmLock(pGVM);
     572    pdmR0IsaSetIrq(pGVM, iIrq, iLevel, uTagSrc);
     573    pdmUnlock(pGVM);
    574574}
    575575
     
    580580    PDMDEV_ASSERT_DEVINS(pDevIns);
    581581    Log4(("pdmR0PciHlp_IoApicSetIrq: iIrq=%d iLevel=%d uTagSrc=%#x\n", iIrq, iLevel, uTagSrc));
    582     PVMCC pVM = pDevIns->Internal.s.pVMR0;
    583 
    584     if (pVM->pdm.s.IoApic.pDevInsR0)
    585         pVM->pdm.s.IoApic.pfnSetIrqR0(pVM->pdm.s.IoApic.pDevInsR0, iIrq, iLevel, uTagSrc);
    586     else if (pVM->pdm.s.IoApic.pDevInsR3)
     582    PGVM pGVM = pDevIns->Internal.s.pVMR0;
     583
     584    if (pGVM->pdm.s.IoApic.pDevInsR0)
     585        pGVM->pdm.s.IoApic.pfnSetIrqR0(pGVM->pdm.s.IoApic.pDevInsR0, iIrq, iLevel, uTagSrc);
     586    else if (pGVM->pdm.s.IoApic.pDevInsR3)
    587587    {
    588588        /* queue for ring-3 execution. */
    589         PPDMDEVHLPTASK pTask = (PPDMDEVHLPTASK)PDMQueueAlloc(pVM->pdm.s.pDevHlpQueueR0);
     589        PPDMDEVHLPTASK pTask = (PPDMDEVHLPTASK)PDMQueueAlloc(pGVM->pdm.s.pDevHlpQueueR0);
    590590        if (pTask)
    591591        {
     
    596596            pTask->u.IoApicSetIRQ.uTagSrc = uTagSrc;
    597597
    598             PDMQueueInsertEx(pVM->pdm.s.pDevHlpQueueR0, &pTask->Core, 0);
     598            PDMQueueInsertEx(pGVM->pdm.s.pDevHlpQueueR0, &pTask->Core, 0);
    599599        }
    600600        else
     
    609609    PDMDEV_ASSERT_DEVINS(pDevIns);
    610610    Log4(("pdmR0PciHlp_IoApicSendMsi: GCPhys=%p uValue=%d uTagSrc=%#x\n", GCPhys, uValue, uTagSrc));
    611     PVMCC pVM = pDevIns->Internal.s.pVMR0;
    612     if (pVM->pdm.s.IoApic.pDevInsR0)
    613         pVM->pdm.s.IoApic.pfnSendMsiR0(pVM->pdm.s.IoApic.pDevInsR0, GCPhys, uValue, uTagSrc);
     611    PGVM pGVM = pDevIns->Internal.s.pVMR0;
     612    if (pGVM->pdm.s.IoApic.pDevInsR0)
     613        pGVM->pdm.s.IoApic.pfnSendMsiR0(pGVM->pdm.s.IoApic.pDevInsR0, GCPhys, uValue, uTagSrc);
    614614    else
    615615        AssertFatalMsgFailed(("Lazy bastards!"));
     
    784784 *
    785785 * @returns true if delivered, false if postponed.
    786  * @param   pVM         The cross context VM structure.
     786 * @param   pGVM        The global (ring-0) VM structure.
    787787 * @param   iIrq        The irq.
    788788 * @param   iLevel      The new level.
     
    791791 * @remarks The caller holds the PDM lock.
    792792 */
    793 static bool pdmR0IsaSetIrq(PVMCC pVM, int iIrq, int iLevel, uint32_t uTagSrc)
    794 {
    795     if (RT_LIKELY(    (   pVM->pdm.s.IoApic.pDevInsR0
    796                        || !pVM->pdm.s.IoApic.pDevInsR3)
    797                   &&  (   pVM->pdm.s.Pic.pDevInsR0
    798                        || !pVM->pdm.s.Pic.pDevInsR3)))
     793static bool pdmR0IsaSetIrq(PGVM pGVM, int iIrq, int iLevel, uint32_t uTagSrc)
     794{
     795    if (RT_LIKELY(    (   pGVM->pdm.s.IoApic.pDevInsR0
     796                       || !pGVM->pdm.s.IoApic.pDevInsR3)
     797                  &&  (   pGVM->pdm.s.Pic.pDevInsR0
     798                       || !pGVM->pdm.s.Pic.pDevInsR3)))
    799799    {
    800         if (pVM->pdm.s.Pic.pDevInsR0)
    801             pVM->pdm.s.Pic.pfnSetIrqR0(pVM->pdm.s.Pic.pDevInsR0, iIrq, iLevel, uTagSrc);
    802         if (pVM->pdm.s.IoApic.pDevInsR0)
    803             pVM->pdm.s.IoApic.pfnSetIrqR0(pVM->pdm.s.IoApic.pDevInsR0, iIrq, iLevel, uTagSrc);
     800        if (pGVM->pdm.s.Pic.pDevInsR0)
     801            pGVM->pdm.s.Pic.pfnSetIrqR0(pGVM->pdm.s.Pic.pDevInsR0, iIrq, iLevel, uTagSrc);
     802        if (pGVM->pdm.s.IoApic.pDevInsR0)
     803            pGVM->pdm.s.IoApic.pfnSetIrqR0(pGVM->pdm.s.IoApic.pDevInsR0, iIrq, iLevel, uTagSrc);
    804804        return true;
    805805    }
    806806
    807807    /* queue for ring-3 execution. */
    808     PPDMDEVHLPTASK pTask = (PPDMDEVHLPTASK)PDMQueueAlloc(pVM->pdm.s.pDevHlpQueueR0);
     808    PPDMDEVHLPTASK pTask = (PPDMDEVHLPTASK)PDMQueueAlloc(pGVM->pdm.s.pDevHlpQueueR0);
    809809    AssertReturn(pTask, false);
    810810
     
    815815    pTask->u.IsaSetIRQ.uTagSrc = uTagSrc;
    816816
    817     PDMQueueInsertEx(pVM->pdm.s.pDevHlpQueueR0, &pTask->Core, 0);
     817    PDMQueueInsertEx(pGVM->pdm.s.pDevHlpQueueR0, &pTask->Core, 0);
    818818    return false;
    819819}
     
    825825 * @returns See PFNPDMDEVREQHANDLERR0.
    826826 * @param   pGVM    The global (ring-0) VM structure. (For validation.)
    827  * @param   pVM     The cross context VM structure. (For validation.)
    828827 * @param   pReq    Pointer to the request buffer.
    829828 */
    830 VMMR0_INT_DECL(int) PDMR0DeviceCallReqHandler(PGVM pGVM, PVMCC pVM, PPDMDEVICECALLREQHANDLERREQ pReq)
     829VMMR0_INT_DECL(int) PDMR0DeviceCallReqHandler(PGVM pGVM, PPDMDEVICECALLREQHANDLERREQ pReq)
    831830{
    832831    /*
    833832     * Validate input and make the call.
    834833     */
    835     int rc = GVMMR0ValidateGVMandVM(pGVM, pVM);
     834    int rc = GVMMR0ValidateGVM(pGVM);
    836835    if (RT_SUCCESS(rc))
    837836    {
     
    841840        PPDMDEVINS pDevIns = pReq->pDevInsR0;
    842841        AssertPtrReturn(pDevIns, VERR_INVALID_POINTER);
    843         AssertReturn(pDevIns->Internal.s.pVMR0 == pVM, VERR_INVALID_PARAMETER);
     842        AssertReturn(pDevIns->Internal.s.pVMR0 == pGVM, VERR_INVALID_PARAMETER);
    844843
    845844        PFNPDMDEVREQHANDLERR0 pfnReqHandlerR0 = pReq->pfnReqHandlerR0;
  • trunk/src/VBox/VMM/VMMR0/PDMR0Driver.cpp

    r80333 r80346  
    3737 * @returns See PFNPDMDRVREQHANDLERR0.
    3838 * @param   pGVM    The global (ring-0) VM structure. (For validation.)
    39  * @param   pVM     The cross context VM structure. (For validation.)
    4039 * @param   pReq    Pointer to the request buffer.
    4140 */
    42 VMMR0_INT_DECL(int) PDMR0DriverCallReqHandler(PGVM pGVM, PVMCC pVM, PPDMDRIVERCALLREQHANDLERREQ pReq)
     41VMMR0_INT_DECL(int) PDMR0DriverCallReqHandler(PGVM pGVM, PPDMDRIVERCALLREQHANDLERREQ pReq)
    4342{
    4443    /*
    4544     * Validate input and make the call.
    4645     */
    47     int rc = GVMMR0ValidateGVMandVM(pGVM, pVM);
     46    int rc = GVMMR0ValidateGVM(pGVM);
    4847    if (RT_SUCCESS(rc))
    4948    {
     
    5352        PPDMDRVINS pDrvIns = pReq->pDrvInsR0;
    5453        AssertPtrReturn(pDrvIns, VERR_INVALID_POINTER);
    55         AssertReturn(pDrvIns->Internal.s.pVMR0 == pVM, VERR_INVALID_PARAMETER);
     54        AssertReturn(pDrvIns->Internal.s.pVMR0 == pGVM, VERR_INVALID_PARAMETER);
    5655
    5756        PFNPDMDRVREQHANDLERR0 pfnReqHandlerR0 = pDrvIns->Internal.s.pfnReqHandlerR0;
  • trunk/src/VBox/VMM/VMMR0/PGMR0.cpp

    r80334 r80346  
    6363 *
    6464 * @param   pGVM        The global (ring-0) VM structure.
    65  * @param   pVM         The cross context VM structure.
    6665 * @param   idCpu       The ID of the calling EMT.
    6766 *
     
    7170 *          must clear the new pages.
    7271 */
    73 VMMR0_INT_DECL(int) PGMR0PhysAllocateHandyPages(PGVM pGVM, PVMCC pVM, VMCPUID idCpu)
     72VMMR0_INT_DECL(int) PGMR0PhysAllocateHandyPages(PGVM pGVM, VMCPUID idCpu)
    7473{
    7574    /*
     
    7877    AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID); /* caller already checked this, but just to be sure. */
    7978    AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_NOT_OWNER);
    80     PGM_LOCK_ASSERT_OWNER_EX(pVM, &pGVM->aCpus[idCpu]);
     79    PGM_LOCK_ASSERT_OWNER_EX(pGVM, &pGVM->aCpus[idCpu]);
    8180
    8281    /*
    8382     * Check for error injection.
    8483     */
    85     if (RT_UNLIKELY(pVM->pgm.s.fErrInjHandyPages))
     84    if (RT_UNLIKELY(pGVM->pgm.s.fErrInjHandyPages))
    8685        return VERR_NO_MEMORY;
    8786
     
    8988     * Try allocate a full set of handy pages.
    9089     */
    91     uint32_t iFirst = pVM->pgm.s.cHandyPages;
    92     AssertReturn(iFirst <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), VERR_PGM_HANDY_PAGE_IPE);
    93     uint32_t cPages = RT_ELEMENTS(pVM->pgm.s.aHandyPages) - iFirst;
     90    uint32_t iFirst = pGVM->pgm.s.cHandyPages;
     91    AssertReturn(iFirst <= RT_ELEMENTS(pGVM->pgm.s.aHandyPages), VERR_PGM_HANDY_PAGE_IPE);
     92    uint32_t cPages = RT_ELEMENTS(pGVM->pgm.s.aHandyPages) - iFirst;
    9493    if (!cPages)
    9594        return VINF_SUCCESS;
    96     int rc = GMMR0AllocateHandyPages(pGVM, pVM, idCpu, cPages, cPages, &pVM->pgm.s.aHandyPages[iFirst]);
     95    int rc = GMMR0AllocateHandyPages(pGVM, idCpu, cPages, cPages, &pGVM->pgm.s.aHandyPages[iFirst]);
    9796    if (RT_SUCCESS(rc))
    9897    {
    9998#ifdef VBOX_STRICT
    100         for (uint32_t i = 0; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
    101         {
    102             Assert(pVM->pgm.s.aHandyPages[i].idPage != NIL_GMM_PAGEID);
    103             Assert(pVM->pgm.s.aHandyPages[i].idPage <= GMM_PAGEID_LAST);
    104             Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
    105             Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys != NIL_RTHCPHYS);
    106             Assert(!(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
     99        for (uint32_t i = 0; i < RT_ELEMENTS(pGVM->pgm.s.aHandyPages); i++)
     100        {
     101            Assert(pGVM->pgm.s.aHandyPages[i].idPage != NIL_GMM_PAGEID);
     102            Assert(pGVM->pgm.s.aHandyPages[i].idPage <= GMM_PAGEID_LAST);
     103            Assert(pGVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
     104            Assert(pGVM->pgm.s.aHandyPages[i].HCPhysGCPhys != NIL_RTHCPHYS);
     105            Assert(!(pGVM->pgm.s.aHandyPages[i].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
    107106        }
    108107#endif
    109108
    110         pVM->pgm.s.cHandyPages = RT_ELEMENTS(pVM->pgm.s.aHandyPages);
     109        pGVM->pgm.s.cHandyPages = RT_ELEMENTS(pGVM->pgm.s.aHandyPages);
    111110    }
    112111    else if (rc != VERR_GMM_SEED_ME)
     
    120119            /* We're ASSUMING that GMM has updated all the entires before failing us. */
    121120            uint32_t i;
    122             for (i = iFirst; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
     121            for (i = iFirst; i < RT_ELEMENTS(pGVM->pgm.s.aHandyPages); i++)
    123122            {
    124                 Assert(pVM->pgm.s.aHandyPages[i].idPage == NIL_GMM_PAGEID);
    125                 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
    126                 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys == NIL_RTHCPHYS);
     123                Assert(pGVM->pgm.s.aHandyPages[i].idPage == NIL_GMM_PAGEID);
     124                Assert(pGVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
     125                Assert(pGVM->pgm.s.aHandyPages[i].HCPhysGCPhys == NIL_RTHCPHYS);
    127126            }
    128127#endif
     
    136135                if (cPages + iFirst < PGM_HANDY_PAGES_MIN)
    137136                    cPages = PGM_HANDY_PAGES_MIN - iFirst;
    138                 rc = GMMR0AllocateHandyPages(pGVM, pVM, idCpu, 0, cPages, &pVM->pgm.s.aHandyPages[iFirst]);
     137                rc = GMMR0AllocateHandyPages(pGVM, idCpu, 0, cPages, &pGVM->pgm.s.aHandyPages[iFirst]);
    139138            } while (   (   rc == VERR_GMM_HIT_GLOBAL_LIMIT
    140139                         || rc == VERR_GMM_HIT_VM_ACCOUNT_LIMIT)
     
    146145                while (i-- > 0)
    147146                {
    148                     Assert(pVM->pgm.s.aHandyPages[i].idPage != NIL_GMM_PAGEID);
    149                     Assert(pVM->pgm.s.aHandyPages[i].idPage <= GMM_PAGEID_LAST);
    150                     Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
    151                     Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys != NIL_RTHCPHYS);
    152                     Assert(!(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
     147                    Assert(pGVM->pgm.s.aHandyPages[i].idPage != NIL_GMM_PAGEID);
     148                    Assert(pGVM->pgm.s.aHandyPages[i].idPage <= GMM_PAGEID_LAST);
     149                    Assert(pGVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
     150                    Assert(pGVM->pgm.s.aHandyPages[i].HCPhysGCPhys != NIL_RTHCPHYS);
     151                    Assert(!(pGVM->pgm.s.aHandyPages[i].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
    153152                }
    154153
    155                 for (i = cPages + iFirst; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
     154                for (i = cPages + iFirst; i < RT_ELEMENTS(pGVM->pgm.s.aHandyPages); i++)
    156155                {
    157                     Assert(pVM->pgm.s.aHandyPages[i].idPage == NIL_GMM_PAGEID);
    158                     Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
    159                     Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys == NIL_RTHCPHYS);
     156                    Assert(pGVM->pgm.s.aHandyPages[i].idPage == NIL_GMM_PAGEID);
     157                    Assert(pGVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
     158                    Assert(pGVM->pgm.s.aHandyPages[i].HCPhysGCPhys == NIL_RTHCPHYS);
    160159                }
    161160#endif
    162161
    163                 pVM->pgm.s.cHandyPages = iFirst + cPages;
     162                pGVM->pgm.s.cHandyPages = iFirst + cPages;
    164163            }
    165164        }
     
    168167        {
    169168            LogRel(("PGMR0PhysAllocateHandyPages: rc=%Rrc iFirst=%d cPages=%d\n", rc, iFirst, cPages));
    170             VM_FF_SET(pVM, VM_FF_PGM_NO_MEMORY);
     169            VM_FF_SET(pGVM, VM_FF_PGM_NO_MEMORY);
    171170        }
    172171    }
     
    187186 *
    188187 * @param   pGVM        The global (ring-0) VM structure.
    189  * @param   pVM         The cross context VM structure.
    190188 * @param   idCpu       The ID of the calling EMT.
    191189 *
     
    194192 * @remarks Must be called from within the PGM critical section.
    195193 */
    196 VMMR0_INT_DECL(int) PGMR0PhysFlushHandyPages(PGVM pGVM, PVMCC pVM, VMCPUID idCpu)
     194VMMR0_INT_DECL(int) PGMR0PhysFlushHandyPages(PGVM pGVM, VMCPUID idCpu)
    197195{
    198196    /*
     
    201199    AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID); /* caller already checked this, but just to be sure. */
    202200    AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_NOT_OWNER);
    203     PGM_LOCK_ASSERT_OWNER_EX(pVM, &pGVM->aCpus[idCpu]);
     201    PGM_LOCK_ASSERT_OWNER_EX(pGVM, &pGVM->aCpus[idCpu]);
    204202
    205203    /*
    206204     * Try allocate a full set of handy pages.
    207205     */
    208     uint32_t iFirst = pVM->pgm.s.cHandyPages;
    209     AssertReturn(iFirst <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), VERR_PGM_HANDY_PAGE_IPE);
    210     uint32_t cPages = RT_ELEMENTS(pVM->pgm.s.aHandyPages) - iFirst;
     206    uint32_t iFirst = pGVM->pgm.s.cHandyPages;
     207    AssertReturn(iFirst <= RT_ELEMENTS(pGVM->pgm.s.aHandyPages), VERR_PGM_HANDY_PAGE_IPE);
     208    uint32_t cPages = RT_ELEMENTS(pGVM->pgm.s.aHandyPages) - iFirst;
    211209    if (!cPages)
    212210        return VINF_SUCCESS;
    213     int rc = GMMR0AllocateHandyPages(pGVM, pVM, idCpu, cPages, 0, &pVM->pgm.s.aHandyPages[iFirst]);
     211    int rc = GMMR0AllocateHandyPages(pGVM, idCpu, cPages, 0, &pGVM->pgm.s.aHandyPages[iFirst]);
    214212
    215213    LogFlow(("PGMR0PhysFlushHandyPages: cPages=%d rc=%Rrc\n", cPages, rc));
     
    226224 *
    227225 * @param   pGVM        The global (ring-0) VM structure.
    228  * @param   pVM         The cross context VM structure.
    229226 * @param   idCpu       The ID of the calling EMT.
    230227 *
     
    234231 *          must clear the new pages.
    235232 */
    236 VMMR0_INT_DECL(int) PGMR0PhysAllocateLargeHandyPage(PGVM pGVM, PVMCC pVM, VMCPUID idCpu)
     233VMMR0_INT_DECL(int) PGMR0PhysAllocateLargeHandyPage(PGVM pGVM, VMCPUID idCpu)
    237234{
    238235    /*
     
    241238    AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID); /* caller already checked this, but just to be sure. */
    242239    AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_NOT_OWNER);
    243     PGM_LOCK_ASSERT_OWNER_EX(pVM, &pGVM->aCpus[idCpu]);
    244     Assert(!pVM->pgm.s.cLargeHandyPages);
     240    PGM_LOCK_ASSERT_OWNER_EX(pGVM, &pGVM->aCpus[idCpu]);
     241    Assert(!pGVM->pgm.s.cLargeHandyPages);
    245242
    246243    /*
    247244     * Do the job.
    248245     */
    249     int rc = GMMR0AllocateLargePage(pGVM, pVM, idCpu, _2M,
    250                                     &pVM->pgm.s.aLargeHandyPage[0].idPage,
    251                                     &pVM->pgm.s.aLargeHandyPage[0].HCPhysGCPhys);
     246    int rc = GMMR0AllocateLargePage(pGVM, idCpu, _2M,
     247                                    &pGVM->pgm.s.aLargeHandyPage[0].idPage,
     248                                    &pGVM->pgm.s.aLargeHandyPage[0].HCPhysGCPhys);
    252249    if (RT_SUCCESS(rc))
    253         pVM->pgm.s.cLargeHandyPages = 1;
     250        pGVM->pgm.s.cLargeHandyPages = 1;
    254251
    255252    return rc;
     
    383380 *
    384381 * @param   pGVM                The global (ring-0) VM structure.
    385  * @param   pVM                 The cross context VM structure.
    386  */
    387 VMMR0_INT_DECL(int) PGMR0PhysSetupIoMmu(PGVM pGVM, PVMCC pVM)
    388 {
    389     int rc = GVMMR0ValidateGVMandVM(pGVM, pVM);
     382 */
     383VMMR0_INT_DECL(int) PGMR0PhysSetupIoMmu(PGVM pGVM)
     384{
     385    int rc = GVMMR0ValidateGVM(pGVM);
    390386    if (RT_FAILURE(rc))
    391387        return rc;
    392388
    393389#ifdef VBOX_WITH_PCI_PASSTHROUGH
    394     if (pVM->pgm.s.fPciPassthrough)
     390    if (pGVM->pgm.s.fPciPassthrough)
    395391    {
    396392        /*
     
    398394         * IOMMU about each of them.
    399395         */
    400         pgmLock(pVM);
     396        pgmLock(pGVM);
    401397        rc = GPciRawR0GuestPageBeginAssignments(pGVM);
    402398        if (RT_SUCCESS(rc))
    403399        {
    404             for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR0; RT_SUCCESS(rc) && pRam; pRam = pRam->pNextR0)
     400            for (PPGMRAMRANGE pRam = pGVM->pgm.s.pRamRangesXR0; RT_SUCCESS(rc) && pRam; pRam = pRam->pNextR0)
    405401            {
    406402                PPGMPAGE    pPage  = &pRam->aPages[0];
     
    427423                rc = rc2;
    428424        }
    429         pgmUnlock(pVM);
     425        pgmUnlock(pGVM);
    430426    }
    431427    else
     
    440436 *
    441437 * @returns VBox status code (appropriate for trap handling and GC return).
    442  * @param   pVM                 The cross context VM structure.
    443  * @param   pVCpu               The cross context virtual CPU structure.
     438 * @param   pGVM                The global (ring-0) VM structure.
     439 * @param   pGVCpu              The global (ring-0) CPU structure of the calling
     440 *                              EMT.
    444441 * @param   enmShwPagingMode    Paging mode for the nested page tables.
    445442 * @param   uErr                The trap error code.
     
    447444 * @param   GCPhysFault         The fault address.
    448445 */
    449 VMMR0DECL(int) PGMR0Trap0eHandlerNestedPaging(PVMCC pVM, PVMCPUCC pVCpu, PGMMODE enmShwPagingMode, RTGCUINT uErr,
     446VMMR0DECL(int) PGMR0Trap0eHandlerNestedPaging(PGVM pGVM, PGVMCPU pGVCpu, PGMMODE enmShwPagingMode, RTGCUINT uErr,
    450447                                              PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault)
    451448{
     
    453450
    454451    LogFlow(("PGMTrap0eHandler: uErr=%RGx GCPhysFault=%RGp eip=%RGv\n", uErr, GCPhysFault, (RTGCPTR)pRegFrame->rip));
    455     STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0e, a);
    456     STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
     452    STAM_PROFILE_START(&pGVCpu->pgm.s.StatRZTrap0e, a);
     453    STAM_STATS({ pGVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
    457454
    458455    /* AMD uses the host's paging mode; Intel has a single mode (EPT). */
     
    473470        {
    474471            if (uErr & X86_TRAP_PF_RW)
    475                 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentWrite);
     472                STAM_COUNTER_INC(&pGVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentWrite);
    476473            else
    477                 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentRead);
     474                STAM_COUNTER_INC(&pGVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentRead);
    478475        }
    479476        else if (uErr & X86_TRAP_PF_RW)
    480             STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSWrite);
     477            STAM_COUNTER_INC(&pGVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSWrite);
    481478        else if (uErr & X86_TRAP_PF_RSVD)
    482             STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSReserved);
     479            STAM_COUNTER_INC(&pGVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSReserved);
    483480        else if (uErr & X86_TRAP_PF_ID)
    484             STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNXE);
     481            STAM_COUNTER_INC(&pGVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNXE);
    485482        else
    486             STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSRead);
     483            STAM_COUNTER_INC(&pGVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSRead);
    487484    }
    488485    else
     
    491488        {
    492489            if (uErr & X86_TRAP_PF_RW)
    493                 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentWrite);
     490                STAM_COUNTER_INC(&pGVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentWrite);
    494491            else
    495                 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentRead);
     492                STAM_COUNTER_INC(&pGVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentRead);
    496493        }
    497494        else if (uErr & X86_TRAP_PF_RW)
    498             STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVWrite);
     495            STAM_COUNTER_INC(&pGVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVWrite);
    499496        else if (uErr & X86_TRAP_PF_ID)
    500             STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSNXE);
     497            STAM_COUNTER_INC(&pGVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSNXE);
    501498        else if (uErr & X86_TRAP_PF_RSVD)
    502             STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVReserved);
     499            STAM_COUNTER_INC(&pGVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVReserved);
    503500    }
    504501#endif
     
    515512    {
    516513        case PGMMODE_32_BIT:
    517             rc = PGM_BTH_NAME_32BIT_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
     514            rc = PGM_BTH_NAME_32BIT_PROT(Trap0eHandler)(pGVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
    518515            break;
    519516        case PGMMODE_PAE:
    520517        case PGMMODE_PAE_NX:
    521             rc = PGM_BTH_NAME_PAE_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
     518            rc = PGM_BTH_NAME_PAE_PROT(Trap0eHandler)(pGVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
    522519            break;
    523520        case PGMMODE_AMD64:
    524521        case PGMMODE_AMD64_NX:
    525             rc = PGM_BTH_NAME_AMD64_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
     522            rc = PGM_BTH_NAME_AMD64_PROT(Trap0eHandler)(pGVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
    526523            break;
    527524        case PGMMODE_EPT:
    528             rc = PGM_BTH_NAME_EPT_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
     525            rc = PGM_BTH_NAME_EPT_PROT(Trap0eHandler)(pGVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
    529526            break;
    530527        default:
     
    535532    if (fLockTaken)
    536533    {
    537         PGM_LOCK_ASSERT_OWNER(pVM);
    538         pgmUnlock(pVM);
     534        PGM_LOCK_ASSERT_OWNER(pGVM);
     535        pgmUnlock(pGVM);
    539536    }
    540537
     
    556553    }
    557554
    558     STAM_STATS({ if (!pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
    559                     pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2Misc; });
    560     STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
     555    STAM_STATS({ if (!pGVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
     556                    pGVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pGVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2Misc; });
     557    STAM_PROFILE_STOP_EX(&pGVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, pGVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
    561558    return rc;
    562559}
     
    568565 *
    569566 * @returns VBox status code (appropriate for trap handling and GC return).
    570  * @param   pVM                 The cross context VM structure.
    571  * @param   pVCpu               The cross context virtual CPU structure.
     567 * @param   pGVM                The global (ring-0) VM structure.
     568 * @param   pGVCpu              The global (ring-0) CPU structure of the calling
     569 *                              EMT.
    572570 * @param   enmShwPagingMode    Paging mode for the nested page tables.
    573571 * @param   pRegFrame           Trap register frame.
     
    576574 *                              (VT-x).
    577575 */
    578 VMMR0DECL(VBOXSTRICTRC) PGMR0Trap0eHandlerNPMisconfig(PVMCC pVM, PVMCPUCC pVCpu, PGMMODE enmShwPagingMode,
     576VMMR0DECL(VBOXSTRICTRC) PGMR0Trap0eHandlerNPMisconfig(PGVM pGVM, PGVMCPU pGVCpu, PGMMODE enmShwPagingMode,
    579577                                                      PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, uint32_t uErr)
    580578{
    581579#ifdef PGM_WITH_MMIO_OPTIMIZATIONS
    582     STAM_PROFILE_START(&pVCpu->CTX_SUFF(pStats)->StatR0NpMiscfg, a);
     580    STAM_PROFILE_START(&pGVCpu->CTX_SUFF(pStats)->StatR0NpMiscfg, a);
    583581    VBOXSTRICTRC rc;
    584582
     
    586584     * Try lookup the all access physical handler for the address.
    587585     */
    588     pgmLock(pVM);
    589     PPGMPHYSHANDLER         pHandler     = pgmHandlerPhysicalLookup(pVM, GCPhysFault);
    590     PPGMPHYSHANDLERTYPEINT  pHandlerType = RT_LIKELY(pHandler) ? PGMPHYSHANDLER_GET_TYPE(pVM, pHandler) : NULL;
     586    pgmLock(pGVM);
     587    PPGMPHYSHANDLER         pHandler     = pgmHandlerPhysicalLookup(pGVM, GCPhysFault);
     588    PPGMPHYSHANDLERTYPEINT  pHandlerType = RT_LIKELY(pHandler) ? PGMPHYSHANDLER_GET_TYPE(pGVM, pHandler) : NULL;
    591589    if (RT_LIKELY(pHandler && pHandlerType->enmKind != PGMPHYSHANDLERKIND_WRITE))
    592590    {
     
    599597        if (   (   pHandler->cAliasedPages
    600598                || pHandler->cTmpOffPages)
    601             && (   (pPage = pgmPhysGetPage(pVM, GCPhysFault)) == NULL
     599            && (   (pPage = pgmPhysGetPage(pGVM, GCPhysFault)) == NULL
    602600                || PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
    603601           )
    604602        {
    605603            Log(("PGMR0Trap0eHandlerNPMisconfig: Resyncing aliases / tmp-off page at %RGp (uErr=%#x) %R[pgmpage]\n", GCPhysFault, uErr, pPage));
    606             STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0NpMiscfgSyncPage);
    607             rc = pgmShwSyncNestedPageLocked(pVCpu, GCPhysFault, 1 /*cPages*/, enmShwPagingMode);
    608             pgmUnlock(pVM);
     604            STAM_COUNTER_INC(&pGVCpu->pgm.s.CTX_SUFF(pStats)->StatR0NpMiscfgSyncPage);
     605            rc = pgmShwSyncNestedPageLocked(pGVCpu, GCPhysFault, 1 /*cPages*/, enmShwPagingMode);
     606            pgmUnlock(pGVM);
    609607        }
    610608        else
     
    614612                void *pvUser = pHandler->CTX_SUFF(pvUser);
    615613                STAM_PROFILE_START(&pHandler->Stat, h);
    616                 pgmUnlock(pVM);
     614                pgmUnlock(pGVM);
    617615
    618616                Log6(("PGMR0Trap0eHandlerNPMisconfig: calling %p(,%#x,,%RGp,%p)\n", pHandlerType->CTX_SUFF(pfnPfHandler), uErr, GCPhysFault, pvUser));
    619                 rc = pHandlerType->CTX_SUFF(pfnPfHandler)(pVM, pVCpu, uErr == UINT32_MAX ? RTGCPTR_MAX : uErr, pRegFrame,
     617                rc = pHandlerType->CTX_SUFF(pfnPfHandler)(pGVM, pGVCpu, uErr == UINT32_MAX ? RTGCPTR_MAX : uErr, pRegFrame,
    620618                                                          GCPhysFault, GCPhysFault, pvUser);
    621619
    622620#ifdef VBOX_WITH_STATISTICS
    623                 pgmLock(pVM);
    624                 pHandler = pgmHandlerPhysicalLookup(pVM, GCPhysFault);
     621                pgmLock(pGVM);
     622                pHandler = pgmHandlerPhysicalLookup(pGVM, GCPhysFault);
    625623                if (pHandler)
    626624                    STAM_PROFILE_STOP(&pHandler->Stat, h);
    627                 pgmUnlock(pVM);
     625                pgmUnlock(pGVM);
    628626#endif
    629627            }
    630628            else
    631629            {
    632                 pgmUnlock(pVM);
     630                pgmUnlock(pGVM);
    633631                Log(("PGMR0Trap0eHandlerNPMisconfig: %RGp (uErr=%#x) -> R3\n", GCPhysFault, uErr));
    634632                rc = VINF_EM_RAW_EMULATE_INSTR;
     
    645643         */
    646644        Log(("PGMR0Trap0eHandlerNPMisconfig: Out of sync page at %RGp (uErr=%#x)\n", GCPhysFault, uErr));
    647         STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0NpMiscfgSyncPage);
    648         rc = pgmShwSyncNestedPageLocked(pVCpu, GCPhysFault, 1 /*cPages*/, enmShwPagingMode);
    649         pgmUnlock(pVM);
    650     }
    651 
    652     STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0NpMiscfg, a);
     645        STAM_COUNTER_INC(&pGVCpu->pgm.s.CTX_SUFF(pStats)->StatR0NpMiscfgSyncPage);
     646        rc = pgmShwSyncNestedPageLocked(pGVCpu, GCPhysFault, 1 /*cPages*/, enmShwPagingMode);
     647        pgmUnlock(pGVM);
     648    }
     649
     650    STAM_PROFILE_STOP(&pGVCpu->pgm.s.CTX_SUFF(pStats)->StatR0NpMiscfg, a);
    653651    return rc;
    654652
  • trunk/src/VBox/VMM/VMMR0/VMMR0.cpp

    r80334 r80346  
    103103        } \
    104104    } while (0)
    105 # define VMM_CHECK_SMAP_CHECK2(a_pVM, a_BadExpr) \
     105# define VMM_CHECK_SMAP_CHECK2(a_pGVM, a_BadExpr) \
    106106    do { \
    107107        if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
     
    110110            if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
    111111            { /* likely */ } \
     112            else if (a_pGVM) \
     113            { \
     114                SUPR0BadContext((a_pGVM)->pSession, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \
     115                RTStrPrintf((a_pGVM)->vmm.s.szRing0AssertMsg1, sizeof((a_pGVM)->vmm.s.szRing0AssertMsg1), \
     116                            "%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
     117                a_BadExpr; \
     118            } \
    112119            else \
    113120            { \
    114                 SUPR0BadContext((a_pVM) ? (a_pVM)->pSession : NULL, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \
    115                 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1), \
    116                             "%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
     121                SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
    117122                a_BadExpr; \
    118123            } \
     
    120125    } while (0)
    121126#else
    122 # define VMM_CHECK_SMAP_SETUP()            uint32_t const fKernelFeatures = 0
    123 # define VMM_CHECK_SMAP_CHECK(a_BadExpr)            NOREF(fKernelFeatures)
    124 # define VMM_CHECK_SMAP_CHECK2(a_pVM, a_BadExpr)    NOREF(fKernelFeatures)
     127# define VMM_CHECK_SMAP_SETUP()                         uint32_t const fKernelFeatures = 0
     128# define VMM_CHECK_SMAP_CHECK(a_BadExpr)                NOREF(fKernelFeatures)
     129# define VMM_CHECK_SMAP_CHECK2(a_pGVM, a_BadExpr)       NOREF(fKernelFeatures)
    125130#endif
    126131
     
    361366 *
    362367 * @param   pGVM        The global (ring-0) VM structure.
    363  * @param   pVM         The cross context VM structure.
    364368 * @param   uSvnRev     The SVN revision of the ring-3 part.
    365369 * @param   uBuildType  Build type indicator.
    366370 * @thread  EMT(0)
    367371 */
    368 static int vmmR0InitVM(PGVM pGVM, PVMCC pVM, uint32_t uSvnRev, uint32_t uBuildType)
     372static int vmmR0InitVM(PGVM pGVM, uint32_t uSvnRev, uint32_t uBuildType)
    369373{
    370374    VMM_CHECK_SMAP_SETUP();
     
    387391    }
    388392
    389     int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, 0 /*idCpu*/);
     393    int rc = GVMMR0ValidateGVMandEMT(pGVM, 0 /*idCpu*/);
    390394    if (RT_FAILURE(rc))
    391395        return rc;
     
    395399     * Register the EMT R0 logger instance for VCPU 0.
    396400     */
    397     PVMCPUCC pVCpu = VMCC_GET_CPU_0(pVM);
     401    PVMCPUCC pVCpu = VMCC_GET_CPU_0(pGVM);
    398402
    399403    PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
     
    406410        LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
    407411
    408         RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
     412        RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
    409413        LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
    410         RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
     414        RTLogSetDefaultInstanceThread(NULL, pGVM->pSession);
    411415        LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
    412416
     
    416420        LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
    417421
    418         RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
     422        RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
    419423        LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
    420424        pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
    421425        LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
    422         RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
     426        RTLogSetDefaultInstanceThread(NULL, pGVM->pSession);
    423427        LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
    424428
     
    426430        LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
    427431
    428         RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
     432        RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
    429433        RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
    430434        LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
    431435# endif
    432         Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pVM->pSession));
    433         RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
     436        Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pGVM->pSession));
     437        RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
    434438        pR0Logger->fRegistered = true;
    435439    }
     
    439443     * Check if the host supports high resolution timers or not.
    440444     */
    441     if (   pVM->vmm.s.fUsePeriodicPreemptionTimers
     445    if (   pGVM->vmm.s.fUsePeriodicPreemptionTimers
    442446        && !RTTimerCanDoHighResolution())
    443         pVM->vmm.s.fUsePeriodicPreemptionTimers = false;
     447        pGVM->vmm.s.fUsePeriodicPreemptionTimers = false;
    444448
    445449    /*
    446450     * Initialize the per VM data for GVMM and GMM.
    447451     */
    448     VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     452    VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    449453    rc = GVMMR0InitVM(pGVM);
    450 //    if (RT_SUCCESS(rc))
    451 //        rc = GMMR0InitPerVMData(pVM);
    452454    if (RT_SUCCESS(rc))
    453455    {
     
    455457         * Init HM, CPUM and PGM (Darwin only).
    456458         */
    457         VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
    458         rc = HMR0InitVM(pVM);
     459        VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
     460        rc = HMR0InitVM(pGVM);
    459461        if (RT_SUCCESS(rc))
    460             VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION); /* CPUR0InitVM will otherwise panic the host */
     462            VMM_CHECK_SMAP_CHECK2(pGVM, rc = VERR_VMM_RING0_ASSERTION); /* CPUR0InitVM will otherwise panic the host */
    461463        if (RT_SUCCESS(rc))
    462464        {
    463             rc = CPUMR0InitVM(pVM);
     465            rc = CPUMR0InitVM(pGVM);
    464466            if (RT_SUCCESS(rc))
    465467            {
    466                 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     468                VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    467469#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
    468                 rc = PGMR0DynMapInitVM(pVM);
     470                rc = PGMR0DynMapInitVM(pGVM);
    469471#endif
    470472                if (RT_SUCCESS(rc))
    471473                {
    472                     VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     474                    VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    473475                    rc = EMR0InitVM(pGVM);
    474476                    if (RT_SUCCESS(rc))
    475477                    {
    476                         VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     478                        VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    477479#ifdef VBOX_WITH_PCI_PASSTHROUGH
    478                         rc = PciRawR0InitVM(pGVM, pVM);
     480                        rc = PciRawR0InitVM(pGVM);
    479481#endif
    480482                        if (RT_SUCCESS(rc))
    481483                        {
    482                             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
    483                             rc = GIMR0InitVM(pVM);
     484                            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
     485                            rc = GIMR0InitVM(pGVM);
    484486                            if (RT_SUCCESS(rc))
    485487                            {
    486                                 VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION);
     488                                VMM_CHECK_SMAP_CHECK2(pGVM, rc = VERR_VMM_RING0_ASSERTION);
    487489                                if (RT_SUCCESS(rc))
    488490                                {
     
    492494                                     * Collect a bit of info for the VM release log.
    493495                                     */
    494                                     pVM->vmm.s.fIsPreemptPendingApiTrusty = RTThreadPreemptIsPendingTrusty();
    495                                     pVM->vmm.s.fIsPreemptPossible         = RTThreadPreemptIsPossible();;
    496 
    497                                     VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     496                                    pGVM->vmm.s.fIsPreemptPendingApiTrusty = RTThreadPreemptIsPendingTrusty();
     497                                    pGVM->vmm.s.fIsPreemptPossible         = RTThreadPreemptIsPossible();;
     498
     499                                    VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    498500                                    return rc;
    499501                                }
    500502
    501503                                /* bail out*/
    502                                 GIMR0TermVM(pVM);
     504                                GIMR0TermVM(pGVM);
    503505                            }
    504506#ifdef VBOX_WITH_PCI_PASSTHROUGH
    505                             PciRawR0TermVM(pGVM, pVM);
     507                            PciRawR0TermVM(pGVM);
    506508#endif
    507509                        }
     
    509511                }
    510512            }
    511             HMR0TermVM(pVM);
     513            HMR0TermVM(pGVM);
    512514        }
    513515    }
    514516
    515     RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
     517    RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession);
    516518    return rc;
    517519}
     
    523525 * @returns VBox status code.
    524526 * @param   pGVM        The ring-0 VM structure.
    525  * @param   pVM         The cross context VM structure.
    526527 * @param   idCpu       The EMT that's calling.
    527528 */
    528 static int vmmR0InitVMEmt(PGVM pGVM, PVMCC pVM, VMCPUID idCpu)
     529static int vmmR0InitVMEmt(PGVM pGVM, VMCPUID idCpu)
    529530{
    530531    /* Paranoia (caller checked these already). */
     
    541542        && !pR0Logger->fRegistered)
    542543    {
    543         RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
     544        RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
    544545        pR0Logger->fRegistered = true;
    545546    }
    546547#endif
    547     RT_NOREF(pVM);
    548548
    549549    return VINF_SUCCESS;
     
    562562 *
    563563 * @param   pGVM        The global (ring-0) VM structure.
    564  * @param   pVM         The cross context VM structure.
    565564 * @param   idCpu       Set to 0 if EMT(0) or NIL_VMCPUID if session cleanup
    566565 *                      thread.
    567566 * @thread  EMT(0) or session clean up thread.
    568567 */
    569 VMMR0_INT_DECL(int) VMMR0TermVM(PGVM pGVM, PVMCC pVM, VMCPUID idCpu)
     568VMMR0_INT_DECL(int) VMMR0TermVM(PGVM pGVM, VMCPUID idCpu)
    570569{
    571570    /*
     
    575574    {
    576575        AssertReturn(idCpu == 0, VERR_INVALID_CPU_ID);
    577         int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
     576        int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
    578577        if (RT_FAILURE(rc))
    579578            return rc;
     
    581580
    582581#ifdef VBOX_WITH_PCI_PASSTHROUGH
    583     PciRawR0TermVM(pGVM, pVM);
     582    PciRawR0TermVM(pGVM);
    584583#endif
    585584
     
    589588    if (GVMMR0DoingTermVM(pGVM))
    590589    {
    591         GIMR0TermVM(pVM);
    592 
    593         /** @todo I wish to call PGMR0PhysFlushHandyPages(pVM, &pVM->aCpus[idCpu])
     590        GIMR0TermVM(pGVM);
     591
     592        /** @todo I wish to call PGMR0PhysFlushHandyPages(pGVM, &pGVM->aCpus[idCpu])
    594593         *        here to make sure we don't leak any shared pages if we crash... */
    595594#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
    596         PGMR0DynMapTermVM(pVM);
    597 #endif
    598         HMR0TermVM(pVM);
     595        PGMR0DynMapTermVM(pGVM);
     596#endif
     597        HMR0TermVM(pGVM);
    599598    }
    600599
     
    602601     * Deregister the logger.
    603602     */
    604     RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
     603    RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession);
    605604    return VINF_SUCCESS;
    606605}
     
    707706 * @returns VINF_SUCCESS or VINF_EM_HALT.
    708707 * @param   pGVM        The ring-0 VM structure.
    709  * @param   pVM         The cross context VM structure.
    710708 * @param   pGVCpu      The ring-0 virtual CPU structure.
    711  * @param   pVCpu       The cross context virtual CPU structure.
    712709 *
    713710 * @todo r=bird: All the blocking/waiting and EMT managment should move out of
     
    715712 *       parameters and statistics.
    716713 */
    717 static int vmmR0DoHalt(PGVM pGVM, PVMCC pVM, PGVMCPU pGVCpu, PVMCPUCC pVCpu)
    718 {
    719     Assert(pVCpu == pGVCpu);
    720 
     714static int vmmR0DoHalt(PGVM pGVM, PGVMCPU pGVCpu)
     715{
    721716    /*
    722717     * Do spin stat historization.
    723718     */
    724     if (++pVCpu->vmm.s.cR0Halts & 0xff)
     719    if (++pGVCpu->vmm.s.cR0Halts & 0xff)
    725720    { /* likely */ }
    726     else if (pVCpu->vmm.s.cR0HaltsSucceeded > pVCpu->vmm.s.cR0HaltsToRing3)
    727     {
    728         pVCpu->vmm.s.cR0HaltsSucceeded = 2;
    729         pVCpu->vmm.s.cR0HaltsToRing3   = 0;
     721    else if (pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3)
     722    {
     723        pGVCpu->vmm.s.cR0HaltsSucceeded = 2;
     724        pGVCpu->vmm.s.cR0HaltsToRing3   = 0;
    730725    }
    731726    else
    732727    {
    733         pVCpu->vmm.s.cR0HaltsSucceeded = 0;
    734         pVCpu->vmm.s.cR0HaltsToRing3   = 2;
     728        pGVCpu->vmm.s.cR0HaltsSucceeded = 0;
     729        pGVCpu->vmm.s.cR0HaltsToRing3   = 2;
    735730    }
    736731
     
    750745     * Check preconditions.
    751746     */
    752     unsigned const             uMWait              = EMMonitorWaitIsActive(pVCpu);
    753     CPUMINTERRUPTIBILITY const enmInterruptibility = CPUMGetGuestInterruptibility(pVCpu);
    754     if (   pVCpu->vmm.s.fMayHaltInRing0
    755         && !TRPMHasTrap(pVCpu)
     747    unsigned const             uMWait              = EMMonitorWaitIsActive(pGVCpu);
     748    CPUMINTERRUPTIBILITY const enmInterruptibility = CPUMGetGuestInterruptibility(pGVCpu);
     749    if (   pGVCpu->vmm.s.fMayHaltInRing0
     750        && !TRPMHasTrap(pGVCpu)
    756751        && (   enmInterruptibility == CPUMINTERRUPTIBILITY_UNRESTRAINED
    757752            || uMWait > 1))
    758753    {
    759         if (   !VM_FF_IS_ANY_SET(pVM, fVmFFs)
    760             && !VMCPU_FF_IS_ANY_SET(pVCpu, fCpuFFs))
     754        if (   !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
     755            && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
    761756        {
    762757            /*
    763758             * Interrupts pending already?
    764759             */
    765             if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
    766                 APICUpdatePendingInterrupts(pVCpu);
     760            if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
     761                APICUpdatePendingInterrupts(pGVCpu);
    767762
    768763            /*
     
    772767                                    | VMCPU_FF_INTERRUPT_NMI  | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT;
    773768
    774             if (VMCPU_FF_IS_ANY_SET(pVCpu, fIntMask))
    775                 return vmmR0DoHaltInterrupt(pVCpu, uMWait, enmInterruptibility);
     769            if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
     770                return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
    776771            ASMNopPause();
    777772
     
    780775             */
    781776            uint64_t u64Delta;
    782             uint64_t u64GipTime = TMTimerPollGIP(pVM, pVCpu, &u64Delta);
    783 
    784             if (   !VM_FF_IS_ANY_SET(pVM, fVmFFs)
    785                 && !VMCPU_FF_IS_ANY_SET(pVCpu, fCpuFFs))
     777            uint64_t u64GipTime = TMTimerPollGIP(pGVM, pGVCpu, &u64Delta);
     778
     779            if (   !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
     780                && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
    786781            {
    787                 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
    788                     APICUpdatePendingInterrupts(pVCpu);
    789 
    790                 if (VMCPU_FF_IS_ANY_SET(pVCpu, fIntMask))
    791                     return vmmR0DoHaltInterrupt(pVCpu, uMWait, enmInterruptibility);
     782                if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
     783                    APICUpdatePendingInterrupts(pGVCpu);
     784
     785                if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
     786                    return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
    792787
    793788                /*
    794789                 * Wait if there is enough time to the next timer event.
    795790                 */
    796                 if (u64Delta >= pVCpu->vmm.s.cNsSpinBlockThreshold)
     791                if (u64Delta >= pGVCpu->vmm.s.cNsSpinBlockThreshold)
    797792                {
    798793                    /* If there are few other CPU cores around, we will procrastinate a
     
    801796                       dynamically adjust the spin count according to its usfulness or
    802797                       something... */
    803                     if (   pVCpu->vmm.s.cR0HaltsSucceeded > pVCpu->vmm.s.cR0HaltsToRing3
     798                    if (   pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3
    804799                        && RTMpGetOnlineCount() >= 4)
    805800                    {
     
    810805                        {
    811806                            ASMNopPause();
    812                             if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
    813                                 APICUpdatePendingInterrupts(pVCpu);
     807                            if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
     808                                APICUpdatePendingInterrupts(pGVCpu);
    814809                            ASMNopPause();
    815                             if (VM_FF_IS_ANY_SET(pVM, fVmFFs))
     810                            if (VM_FF_IS_ANY_SET(pGVM, fVmFFs))
    816811                            {
    817                                 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3FromSpin);
     812                                STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin);
    818813                                return VINF_EM_HALT;
    819814                            }
    820815                            ASMNopPause();
    821                             if (VMCPU_FF_IS_ANY_SET(pVCpu, fCpuFFs))
     816                            if (VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
    822817                            {
    823                                 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3FromSpin);
     818                                STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin);
    824819                                return VINF_EM_HALT;
    825820                            }
    826821                            ASMNopPause();
    827                             if (VMCPU_FF_IS_ANY_SET(pVCpu, fIntMask))
     822                            if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
    828823                            {
    829                                 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExecFromSpin);
    830                                 return vmmR0DoHaltInterrupt(pVCpu, uMWait, enmInterruptibility);
     824                                STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromSpin);
     825                                return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
    831826                            }
    832827                            ASMNopPause();
     
    836831                    /* Block.  We have to set the state to VMCPUSTATE_STARTED_HALTED here so ring-3
    837832                       knows when to notify us (cannot access VMINTUSERPERVMCPU::fWait from here). */
    838                     VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HALTED, VMCPUSTATE_STARTED);
     833                    VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED_HALTED, VMCPUSTATE_STARTED);
    839834                    uint64_t const u64StartSchedHalt   = RTTimeNanoTS();
    840                     int rc = GVMMR0SchedHalt(pGVM, pVM, pGVCpu, u64GipTime);
     835                    int rc = GVMMR0SchedHalt(pGVM, pGVCpu, u64GipTime);
    841836                    uint64_t const u64EndSchedHalt     = RTTimeNanoTS();
    842837                    uint64_t const cNsElapsedSchedHalt = u64EndSchedHalt - u64StartSchedHalt;
    843                     VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
    844                     STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->vmm.s.StatR0HaltBlock, cNsElapsedSchedHalt);
     838                    VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
     839                    STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlock, cNsElapsedSchedHalt);
    845840                    if (   rc == VINF_SUCCESS
    846841                        || rc == VERR_INTERRUPTED)
     
    850845                        int64_t const cNsOverslept = u64EndSchedHalt - u64GipTime;
    851846                        if (cNsOverslept > 50000)
    852                             STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->vmm.s.StatR0HaltBlockOverslept, cNsOverslept);
     847                            STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOverslept, cNsOverslept);
    853848                        else if (cNsOverslept < -50000)
    854                             STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->vmm.s.StatR0HaltBlockInsomnia,  cNsElapsedSchedHalt);
     849                            STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockInsomnia,  cNsElapsedSchedHalt);
    855850                        else
    856                             STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->vmm.s.StatR0HaltBlockOnTime,    cNsElapsedSchedHalt);
     851                            STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOnTime,    cNsElapsedSchedHalt);
    857852
    858853                        /*
    859854                         * Recheck whether we can resume execution or have to go to ring-3.
    860855                         */
    861                         if (   !VM_FF_IS_ANY_SET(pVM, fVmFFs)
    862                             && !VMCPU_FF_IS_ANY_SET(pVCpu, fCpuFFs))
     856                        if (   !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
     857                            && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
    863858                        {
    864                             if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
    865                                 APICUpdatePendingInterrupts(pVCpu);
    866                             if (VMCPU_FF_IS_ANY_SET(pVCpu, fIntMask))
     859                            if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
     860                                APICUpdatePendingInterrupts(pGVCpu);
     861                            if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
    867862                            {
    868                                 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExecFromBlock);
    869                                 return vmmR0DoHaltInterrupt(pVCpu, uMWait, enmInterruptibility);
     863                                STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromBlock);
     864                                return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
    870865                            }
    871866                        }
     
    10681063/**
    10691064 * Record return code statistics
    1070  * @param   pVM         The cross context VM structure.
     1065 * @param   pGVM        The cross context VM structure.
    10711066 * @param   pVCpu       The cross context virtual CPU structure.
    10721067 * @param   rc          The status code.
     
    12731268 *
    12741269 * @param   pGVM            The global (ring-0) VM structure.
    1275  * @param   pVM             The cross context VM structure.
    1276  *                          The return code is stored in pVM->vmm.s.iLastGZRc.
     1270 * @param   pVMIgnored      The cross context VM structure. The return code is
     1271 *                          stored in pVM->vmm.s.iLastGZRc.
    12771272 * @param   idCpu           The Virtual CPU ID of the calling EMT.
    12781273 * @param   enmOperation    Which operation to execute.
    12791274 * @remarks Assume called with interrupts _enabled_.
    12801275 */
    1281 VMMR0DECL(void) VMMR0EntryFast(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation)
    1282 {
     1276VMMR0DECL(void) VMMR0EntryFast(PGVM pGVM, PVMCC pVMIgnored, VMCPUID idCpu, VMMR0OPERATION enmOperation)
     1277{
     1278    RT_NOREF(pVMIgnored);
     1279
    12831280    /*
    12841281     * Validation.
    12851282     */
    12861283    if (   idCpu < pGVM->cCpus
    1287         && pGVM->cCpus == pVM->cCpus)
     1284        && pGVM->cCpus == pGVM->cCpusUnsafe)
    12881285    { /*likely*/ }
    12891286    else
    12901287    {
    1291         SUPR0Printf("VMMR0EntryFast: Bad idCpu=%#x cCpus=%#x/%#x\n", idCpu, pGVM->cCpus, pVM->cCpus);
     1288        SUPR0Printf("VMMR0EntryFast: Bad idCpu=%#x cCpus=%#x cCpusUnsafe=%#x\n", idCpu, pGVM->cCpus, pGVM->cCpusUnsafe);
    12921289        return;
    12931290    }
    12941291
    1295     PGVMCPU   pGVCpu = &pGVM->aCpus[idCpu];
    1296     PVMCPUCC  pVCpu  = pGVCpu;
     1292    PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
    12971293    RTNATIVETHREAD const hNativeThread = RTThreadNativeSelf();
    1298     if (RT_LIKELY(   pGVCpu->hEMT           == hNativeThread
    1299                   && pVCpu->hNativeThreadR0 == hNativeThread))
     1294    if (RT_LIKELY(   pGVCpu->hEMT            == hNativeThread
     1295                  && pGVCpu->hNativeThreadR0 == hNativeThread))
    13001296    { /* likely */ }
    13011297    else
    13021298    {
    1303         SUPR0Printf("VMMR0EntryFast: Bad thread idCpu=%#x hNativeSelf=%p pGVCpu->hEmt=%p pVCpu->hNativeThreadR0=%p\n",
    1304                     idCpu, hNativeThread, pGVCpu->hEMT, pVCpu->hNativeThreadR0);
     1299        SUPR0Printf("VMMR0EntryFast: Bad thread idCpu=%#x hNativeSelf=%p pGVCpu->hEmt=%p pGVCpu->hNativeThreadR0=%p\n",
     1300                    idCpu, hNativeThread, pGVCpu->hEMT, pGVCpu->hNativeThreadR0);
    13051301        return;
    13061302    }
     
    13101306     */
    13111307    VMM_CHECK_SMAP_SETUP();
    1312     VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1308    VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    13131309
    13141310    /*
     
    13271323                 * Disable preemption.
    13281324                 */
    1329                 Assert(!vmmR0ThreadCtxHookIsEnabled(pVCpu));
     1325                Assert(!vmmR0ThreadCtxHookIsEnabled(pGVCpu));
    13301326                RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
    13311327                RTThreadPreemptDisable(&PreemptState);
     
    13401336                              && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
    13411337                {
    1342                     pVCpu->iHostCpuSet = iHostCpuSet;
    1343                     ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
     1338                    pGVCpu->iHostCpuSet = iHostCpuSet;
     1339                    ASMAtomicWriteU32(&pGVCpu->idHostCpu, idHostCpu);
    13441340
    13451341                    /*
    13461342                     * Update the periodic preemption timer if it's active.
    13471343                     */
    1348                     if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
    1349                         GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
    1350                     VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1344                    if (pGVM->vmm.s.fUsePeriodicPreemptionTimers)
     1345                        GVMMR0SchedUpdatePeriodicPreemptionTimer(pGVM, pGVCpu->idHostCpu, TMCalcHostTimerFrequency(pGVM, pGVCpu));
     1346                    VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    13511347
    13521348#ifdef VMM_R0_TOUCH_FPU
     
    13651361                         * Enable the context switching hook.
    13661362                         */
    1367                         if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
     1363                        if (pGVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
    13681364                        {
    1369                             Assert(!RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook));
    1370                             int rc2 = RTThreadCtxHookEnable(pVCpu->vmm.s.hCtxHook); AssertRC(rc2);
     1365                            Assert(!RTThreadCtxHookIsEnabled(pGVCpu->vmm.s.hCtxHook));
     1366                            int rc2 = RTThreadCtxHookEnable(pGVCpu->vmm.s.hCtxHook); AssertRC(rc2);
    13711367                        }
    13721368
     
    13741370                         * Enter HM context.
    13751371                         */
    1376                         rc = HMR0Enter(pVCpu);
     1372                        rc = HMR0Enter(pGVCpu);
    13771373                        if (RT_SUCCESS(rc))
    13781374                        {
    1379                             VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
     1375                            VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED_HM);
    13801376
    13811377                            /*
     
    13831379                             * we're in HM context.
    13841380                             */
    1385                             if (vmmR0ThreadCtxHookIsEnabled(pVCpu))
     1381                            if (vmmR0ThreadCtxHookIsEnabled(pGVCpu))
    13861382                            {
    13871383                                fPreemptRestored = true;
     
    13921388                             * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
    13931389                             */
    1394                             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
    1395                             rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pVM, pVCpu);
    1396                             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1390                            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
     1391                            rc = vmmR0CallRing3SetJmp(&pGVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pGVM, pGVCpu);
     1392                            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    13971393
    13981394                            /*
     
    14001396                             * assertions are going to panic the host since we're outside the setjmp/longjmp zone.
    14011397                             */
    1402                             if (RT_UNLIKELY(   VMCPU_GET_STATE(pVCpu) != VMCPUSTATE_STARTED_HM
     1398                            if (RT_UNLIKELY(   VMCPU_GET_STATE(pGVCpu) != VMCPUSTATE_STARTED_HM
    14031399                                            && RT_SUCCESS_NP(rc)  && rc !=  VINF_VMM_CALL_HOST ))
    14041400                            {
    1405                                 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
    1406                                 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
    1407                                             "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pVCpu), VMCPUSTATE_STARTED_HM);
     1401                                pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
     1402                                RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2),
     1403                                            "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pGVCpu), VMCPUSTATE_STARTED_HM);
    14081404                                rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
    14091405                            }
    14101406                            /** @todo Get rid of this. HM shouldn't disable the context hook. */
    1411                             else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pVCpu)))
     1407                            else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pGVCpu)))
    14121408                            {
    1413                                 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
    1414                                 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
    1415                                             "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pVCpu, pVCpu->idCpu, rc);
     1409                                pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
     1410                                RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2),
     1411                                            "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pGVCpu, pGVCpu->idCpu, rc);
    14161412                                rc = VERR_INVALID_STATE;
    14171413                            }
    14181414
    1419                             VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
     1415                            VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED);
    14201416                        }
    1421                         STAM_COUNTER_INC(&pVM->vmm.s.StatRunGC);
     1417                        STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC);
    14221418
    14231419                        /*
     
    14251421                         * hook / restore preemption.
    14261422                         */
    1427                         pVCpu->iHostCpuSet = UINT32_MAX;
    1428                         ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
     1423                        pGVCpu->iHostCpuSet = UINT32_MAX;
     1424                        ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
    14291425
    14301426                        /*
     
    14351431                         *       when we get here, but the IPRT API handles that.
    14361432                         */
    1437                         if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
     1433                        if (pGVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
    14381434                        {
    1439                             ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
    1440                             RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
     1435                            ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
     1436                            RTThreadCtxHookDisable(pGVCpu->vmm.s.hCtxHook);
    14411437                        }
    14421438                    }
     
    14471443                    {
    14481444                        rc = VINF_EM_RAW_INTERRUPT;
    1449                         pVCpu->iHostCpuSet = UINT32_MAX;
    1450                         ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
     1445                        pGVCpu->iHostCpuSet = UINT32_MAX;
     1446                        ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
    14511447                    }
    14521448
     
    14561452                        RTThreadPreemptRestore(&PreemptState);
    14571453
    1458                     pVCpu->vmm.s.iLastGZRc = rc;
     1454                    pGVCpu->vmm.s.iLastGZRc = rc;
    14591455
    14601456                    /* Fire dtrace probe and collect statistics. */
    1461                     VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
     1457                    VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc);
    14621458#ifdef VBOX_WITH_STATISTICS
    1463                     vmmR0RecordRC(pVM, pVCpu, rc);
     1459                    vmmR0RecordRC(pGVM, pGVCpu, rc);
    14641460#endif
    14651461#if 1
     
    14711467                    else
    14721468                    {
    1473                         pVCpu->vmm.s.iLastGZRc = rc = vmmR0DoHalt(pGVM, pVM, pGVCpu, pVCpu);
     1469                        pGVCpu->vmm.s.iLastGZRc = rc = vmmR0DoHalt(pGVM, pGVCpu);
    14741470                        if (rc == VINF_SUCCESS)
    14751471                        {
    1476                             pVCpu->vmm.s.cR0HaltsSucceeded++;
     1472                            pGVCpu->vmm.s.cR0HaltsSucceeded++;
    14771473                            continue;
    14781474                        }
    1479                         pVCpu->vmm.s.cR0HaltsToRing3++;
     1475                        pGVCpu->vmm.s.cR0HaltsToRing3++;
    14801476                    }
    14811477#endif
     
    14861482                else
    14871483                {
    1488                     pVCpu->iHostCpuSet = UINT32_MAX;
    1489                     ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
     1484                    pGVCpu->iHostCpuSet = UINT32_MAX;
     1485                    ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
    14901486                    RTThreadPreemptRestore(&PreemptState);
    14911487                    if (iHostCpuSet < RTCPUSET_MAX_CPUS)
    14921488                    {
    1493                         int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
     1489                        int rc = SUPR0TscDeltaMeasureBySetIndex(pGVM->pSession, iHostCpuSet, 0 /*fFlags*/,
    14941490                                                                2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
    14951491                                                                0 /*default cTries*/);
    14961492                        if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
    1497                             pVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
     1493                            pGVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
    14981494                        else
    1499                             pVCpu->vmm.s.iLastGZRc = rc;
     1495                            pGVCpu->vmm.s.iLastGZRc = rc;
    15001496                    }
    15011497                    else
    1502                         pVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
     1498                        pGVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
    15031499                }
    15041500                break;
     
    15151511             * Setup the longjmp machinery and execute guest code (calls NEMR0RunGuestCode).
    15161512             */
    1517             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
    1518             int rc = vmmR0CallRing3SetJmp2(&pVCpu->vmm.s.CallRing3JmpBufR0, NEMR0RunGuestCode, pGVM, idCpu);
    1519             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
    1520             STAM_COUNTER_INC(&pVM->vmm.s.StatRunGC);
    1521 
    1522             pVCpu->vmm.s.iLastGZRc = rc;
     1513            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
     1514#  ifdef VBOXSTRICTRC_STRICT_ENABLED
     1515            int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmm.s.CallRing3JmpBufR0, (PFNVMMR0SETJMP2)NEMR0RunGuestCode, pGVM, idCpu);
     1516#  else
     1517            int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmm.s.CallRing3JmpBufR0, NEMR0RunGuestCode, pGVM, idCpu);
     1518#  endif
     1519            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
     1520            STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC);
     1521
     1522            pGVCpu->vmm.s.iLastGZRc = rc;
    15231523
    15241524            /*
    15251525             * Fire dtrace probe and collect statistics.
    15261526             */
    1527             VBOXVMM_R0_VMM_RETURN_TO_RING3_NEM(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
     1527            VBOXVMM_R0_VMM_RETURN_TO_RING3_NEM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc);
    15281528#  ifdef VBOX_WITH_STATISTICS
    1529             vmmR0RecordRC(pVM, pVCpu, rc);
     1529            vmmR0RecordRC(pGVM, pGVCpu, rc);
    15301530#  endif
    15311531            break;
     
    15381538         */
    15391539        case VMMR0_DO_NOP:
    1540             pVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
     1540            pGVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
    15411541            break;
    15421542
     
    15461546        default:
    15471547            AssertMsgFailed(("%#x\n", enmOperation));
    1548             pVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
    1549             break;
    1550     }
    1551     VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1548            pGVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
     1549            break;
     1550    }
     1551    VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    15521552}
    15531553
     
    15571557 *
    15581558 * @returns true / false accordingly.
    1559  * @param   pVM             The cross context VM structure.
     1559 * @param   pGVM            The global (ring-0) VM structure.
    15601560 * @param   pClaimedSession The session claim to validate.
    15611561 * @param   pSession        The session argument.
    15621562 */
    1563 DECLINLINE(bool) vmmR0IsValidSession(PVMCC pVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
     1563DECLINLINE(bool) vmmR0IsValidSession(PGVM pGVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
    15641564{
    15651565    /* This must be set! */
     
    15681568
    15691569    /* Only one out of the two. */
    1570     if (pVM && pClaimedSession)
     1570    if (pGVM && pClaimedSession)
    15711571        return false;
    1572     if (pVM)
    1573         pClaimedSession = pVM->pSession;
     1572    if (pGVM)
     1573        pClaimedSession = pGVM->pSession;
    15741574    return pClaimedSession == pSession;
    15751575}
     
    15821582 * @returns VBox status code.
    15831583 * @param   pGVM            The global (ring-0) VM structure.
    1584  * @param   pVM             The cross context VM structure.
    15851584 * @param   idCpu           Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
    15861585 *                          is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
     
    15931592 * @remarks Assume called with interrupts _enabled_.
    15941593 */
    1595 static int vmmR0EntryExWorker(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
     1594static int vmmR0EntryExWorker(PGVM pGVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
    15961595                              PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
    15971596{
    15981597    /*
    1599      * Validate pGVM, pVM and idCpu for consistency and validity.
    1600      */
    1601     if (   pGVM != NULL
    1602         || pVM  != NULL)
    1603     {
    1604         if (RT_LIKELY(   RT_VALID_PTR(pGVM)
    1605                       && RT_VALID_PTR(pVM)
    1606                       && ((uintptr_t)pVM & PAGE_OFFSET_MASK) == 0))
     1598     * Validate pGVM and idCpu for consistency and validity.
     1599     */
     1600    if (pGVM != NULL)
     1601    {
     1602        if (RT_LIKELY(((uintptr_t)pGVM & PAGE_OFFSET_MASK) == 0))
    16071603        { /* likely */ }
    16081604        else
    16091605        {
    1610             SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p and/or pVM=%p! (op=%d)\n", pGVM, pVM, enmOperation);
     1606            SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p! (op=%d)\n", pGVM, enmOperation);
    16111607            return VERR_INVALID_POINTER;
    1612         }
    1613 
    1614         if (RT_LIKELY(pGVM == pVM))
    1615         { /* likely */ }
    1616         else
    1617         {
    1618             SUPR0Printf("vmmR0EntryExWorker: pVM mismatch: got %p, pGVM/pVM=%p\n", pVM, pGVM);
    1619             return VERR_INVALID_PARAMETER;
    16201608        }
    16211609
     
    16281616        }
    16291617
    1630         if (RT_LIKELY(   pVM->enmVMState >= VMSTATE_CREATING
    1631                       && pVM->enmVMState <= VMSTATE_TERMINATED
    1632                       && pVM->cCpus      == pGVM->cCpus
    1633                       && pVM->pSession   == pSession
    1634                       && pVM->pSelf      == pVM))
     1618        if (RT_LIKELY(   pGVM->enmVMState >= VMSTATE_CREATING
     1619                      && pGVM->enmVMState <= VMSTATE_TERMINATED
     1620                      && pGVM->pSession   == pSession
     1621                      && pGVM->pSelf      == pGVM))
    16351622        { /* likely */ }
    16361623        else
    16371624        {
    1638             SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p:{.enmVMState=%d, .cCpus=%#x(==%#x), .pSession=%p(==%p), .pSelf=%p(==%p)}! (op=%d)\n",
    1639                         pVM, pVM->enmVMState, pVM->cCpus, pGVM->cCpus, pVM->pSession, pSession, pVM->pSelf, pVM, enmOperation);
     1625            SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p:{.enmVMState=%d, .cCpus=%#x, .pSession=%p(==%p), .pSelf=%p(==%p)}! (op=%d)\n",
     1626                        pGVM, pGVM->enmVMState, pGVM->cCpus, pGVM->pSession, pSession, pGVM->pSelf, pGVM, enmOperation);
    16401627            return VERR_INVALID_POINTER;
    16411628        }
     
    16651652         */
    16661653        case VMMR0_DO_GVMM_CREATE_VM:
    1667             if (pGVM == NULL && pVM == NULL && u64Arg == 0 && idCpu == NIL_VMCPUID)
     1654            if (pGVM == NULL && u64Arg == 0 && idCpu == NIL_VMCPUID)
    16681655                rc = GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr, pSession);
    16691656            else
     
    16741661        case VMMR0_DO_GVMM_DESTROY_VM:
    16751662            if (pReqHdr == NULL && u64Arg == 0)
    1676                 rc = GVMMR0DestroyVM(pGVM, pVM);
     1663                rc = GVMMR0DestroyVM(pGVM);
    16771664            else
    16781665                rc = VERR_INVALID_PARAMETER;
     
    16811668
    16821669        case VMMR0_DO_GVMM_REGISTER_VMCPU:
    1683             if (pGVM != NULL && pVM != NULL)
    1684                 rc = GVMMR0RegisterVCpu(pGVM, pVM, idCpu);
     1670            if (pGVM != NULL)
     1671                rc = GVMMR0RegisterVCpu(pGVM, idCpu);
    16851672            else
    16861673                rc = VERR_INVALID_PARAMETER;
    1687             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1674            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    16881675            break;
    16891676
    16901677        case VMMR0_DO_GVMM_DEREGISTER_VMCPU:
    1691             if (pGVM != NULL && pVM != NULL)
    1692                 rc = GVMMR0DeregisterVCpu(pGVM, pVM, idCpu);
     1678            if (pGVM != NULL)
     1679                rc = GVMMR0DeregisterVCpu(pGVM, idCpu);
    16931680            else
    16941681                rc = VERR_INVALID_PARAMETER;
    1695             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1682            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    16961683            break;
    16971684
     
    16991686            if (pReqHdr)
    17001687                return VERR_INVALID_PARAMETER;
    1701             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
    1702             rc = GVMMR0SchedHaltReq(pGVM, pVM, idCpu, u64Arg);
    1703             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1688            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
     1689            rc = GVMMR0SchedHaltReq(pGVM, idCpu, u64Arg);
     1690            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    17041691            break;
    17051692
     
    17071694            if (pReqHdr || u64Arg)
    17081695                return VERR_INVALID_PARAMETER;
    1709             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
    1710             rc = GVMMR0SchedWakeUp(pGVM, pVM, idCpu);
    1711             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1696            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
     1697            rc = GVMMR0SchedWakeUp(pGVM, idCpu);
     1698            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    17121699            break;
    17131700
     
    17151702            if (pReqHdr || u64Arg)
    17161703                return VERR_INVALID_PARAMETER;
    1717             rc = GVMMR0SchedPoke(pGVM, pVM, idCpu);
    1718             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1704            rc = GVMMR0SchedPoke(pGVM, idCpu);
     1705            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    17191706            break;
    17201707
     
    17221709            if (u64Arg)
    17231710                return VERR_INVALID_PARAMETER;
    1724             rc = GVMMR0SchedWakeUpAndPokeCpusReq(pGVM, pVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
    1725             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1711            rc = GVMMR0SchedWakeUpAndPokeCpusReq(pGVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
     1712            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    17261713            break;
    17271714
     
    17291716            if (pReqHdr || u64Arg > 1)
    17301717                return VERR_INVALID_PARAMETER;
    1731             rc = GVMMR0SchedPoll(pGVM, pVM, idCpu, !!u64Arg);
    1732             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1718            rc = GVMMR0SchedPoll(pGVM, idCpu, !!u64Arg);
     1719            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    17331720            break;
    17341721
     
    17361723            if (u64Arg)
    17371724                return VERR_INVALID_PARAMETER;
    1738             rc = GVMMR0QueryStatisticsReq(pGVM, pVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr, pSession);
    1739             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1725            rc = GVMMR0QueryStatisticsReq(pGVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr, pSession);
     1726            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    17401727            break;
    17411728
     
    17431730            if (u64Arg)
    17441731                return VERR_INVALID_PARAMETER;
    1745             rc = GVMMR0ResetStatisticsReq(pGVM, pVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr, pSession);
    1746             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1732            rc = GVMMR0ResetStatisticsReq(pGVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr, pSession);
     1733            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    17471734            break;
    17481735
     
    17511738         */
    17521739        case VMMR0_DO_VMMR0_INIT:
    1753             rc = vmmR0InitVM(pGVM, pVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
    1754             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1740            rc = vmmR0InitVM(pGVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
     1741            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    17551742            break;
    17561743
     
    17591746         */
    17601747        case VMMR0_DO_VMMR0_INIT_EMT:
    1761             rc = vmmR0InitVMEmt(pGVM, pVM, idCpu);
    1762             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1748            rc = vmmR0InitVMEmt(pGVM, idCpu);
     1749            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    17631750            break;
    17641751
     
    17671754         */
    17681755        case VMMR0_DO_VMMR0_TERM:
    1769             rc = VMMR0TermVM(pGVM, pVM, 0 /*idCpu*/);
    1770             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1756            rc = VMMR0TermVM(pGVM, 0 /*idCpu*/);
     1757            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    17711758            break;
    17721759
     
    17751762         */
    17761763        case VMMR0_DO_HM_ENABLE:
    1777             rc = HMR0EnableAllCpus(pVM);
    1778             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1764            rc = HMR0EnableAllCpus(pGVM);
     1765            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    17791766            break;
    17801767
     
    17831770         */
    17841771        case VMMR0_DO_HM_SETUP_VM:
    1785             rc = HMR0SetupVM(pVM);
    1786             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1772            rc = HMR0SetupVM(pGVM);
     1773            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    17871774            break;
    17881775
     
    17931780            if (idCpu == NIL_VMCPUID)
    17941781                return VERR_INVALID_CPU_ID;
    1795             rc = PGMR0PhysAllocateHandyPages(pGVM, pVM, idCpu);
    1796             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1782            rc = PGMR0PhysAllocateHandyPages(pGVM, idCpu);
     1783            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    17971784            break;
    17981785
     
    18001787            if (idCpu == NIL_VMCPUID)
    18011788                return VERR_INVALID_CPU_ID;
    1802             rc = PGMR0PhysFlushHandyPages(pGVM, pVM, idCpu);
    1803             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1789            rc = PGMR0PhysFlushHandyPages(pGVM, idCpu);
     1790            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    18041791            break;
    18051792
     
    18071794            if (idCpu == NIL_VMCPUID)
    18081795                return VERR_INVALID_CPU_ID;
    1809             rc = PGMR0PhysAllocateLargeHandyPage(pGVM, pVM, idCpu);
    1810             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1796            rc = PGMR0PhysAllocateLargeHandyPage(pGVM, idCpu);
     1797            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    18111798            break;
    18121799
     
    18141801            if (idCpu != 0)
    18151802                return VERR_INVALID_CPU_ID;
    1816             rc = PGMR0PhysSetupIoMmu(pGVM, pVM);
    1817             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1803            rc = PGMR0PhysSetupIoMmu(pGVM);
     1804            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    18181805            break;
    18191806
     
    18241811            if (u64Arg)
    18251812                return VERR_INVALID_PARAMETER;
    1826             rc = GMMR0InitialReservationReq(pGVM, pVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
    1827             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1813            rc = GMMR0InitialReservationReq(pGVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
     1814            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    18281815            break;
    18291816
     
    18311818            if (u64Arg)
    18321819                return VERR_INVALID_PARAMETER;
    1833             rc = GMMR0UpdateReservationReq(pGVM, pVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
    1834             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1820            rc = GMMR0UpdateReservationReq(pGVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
     1821            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    18351822            break;
    18361823
     
    18381825            if (u64Arg)
    18391826                return VERR_INVALID_PARAMETER;
    1840             rc = GMMR0AllocatePagesReq(pGVM, pVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
    1841             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1827            rc = GMMR0AllocatePagesReq(pGVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
     1828            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    18421829            break;
    18431830
     
    18451832            if (u64Arg)
    18461833                return VERR_INVALID_PARAMETER;
    1847             rc = GMMR0FreePagesReq(pGVM, pVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
    1848             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1834            rc = GMMR0FreePagesReq(pGVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
     1835            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    18491836            break;
    18501837
     
    18521839            if (u64Arg)
    18531840                return VERR_INVALID_PARAMETER;
    1854             rc = GMMR0FreeLargePageReq(pGVM, pVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
    1855             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1841            rc = GMMR0FreeLargePageReq(pGVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
     1842            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    18561843            break;
    18571844
     
    18601847                return VERR_INVALID_PARAMETER;
    18611848            rc = GMMR0QueryHypervisorMemoryStatsReq((PGMMMEMSTATSREQ)pReqHdr);
    1862             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1849            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    18631850            break;
    18641851
     
    18681855            if (u64Arg)
    18691856                return VERR_INVALID_PARAMETER;
    1870             rc = GMMR0QueryMemoryStatsReq(pGVM, pVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
    1871             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1857            rc = GMMR0QueryMemoryStatsReq(pGVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
     1858            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    18721859            break;
    18731860
     
    18751862            if (u64Arg)
    18761863                return VERR_INVALID_PARAMETER;
    1877             rc = GMMR0BalloonedPagesReq(pGVM, pVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
    1878             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1864            rc = GMMR0BalloonedPagesReq(pGVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
     1865            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    18791866            break;
    18801867
     
    18821869            if (u64Arg)
    18831870                return VERR_INVALID_PARAMETER;
    1884             rc = GMMR0MapUnmapChunkReq(pGVM, pVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
    1885             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1871            rc = GMMR0MapUnmapChunkReq(pGVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
     1872            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    18861873            break;
    18871874
     
    18891876            if (pReqHdr)
    18901877                return VERR_INVALID_PARAMETER;
    1891             rc = GMMR0SeedChunk(pGVM, pVM, idCpu, (RTR3PTR)u64Arg);
    1892             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1878            rc = GMMR0SeedChunk(pGVM, idCpu, (RTR3PTR)u64Arg);
     1879            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    18931880            break;
    18941881
     
    18981885            if (u64Arg)
    18991886                return VERR_INVALID_PARAMETER;
    1900             rc = GMMR0RegisterSharedModuleReq(pGVM, pVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
    1901             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1887            rc = GMMR0RegisterSharedModuleReq(pGVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
     1888            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    19021889            break;
    19031890
     
    19071894            if (u64Arg)
    19081895                return VERR_INVALID_PARAMETER;
    1909             rc = GMMR0UnregisterSharedModuleReq(pGVM, pVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
    1910             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1896            rc = GMMR0UnregisterSharedModuleReq(pGVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
     1897            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    19111898            break;
    19121899
     
    19171904                ||  pReqHdr)
    19181905                return VERR_INVALID_PARAMETER;
    1919             rc = GMMR0ResetSharedModules(pGVM, pVM, idCpu);
    1920             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1906            rc = GMMR0ResetSharedModules(pGVM, idCpu);
     1907            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    19211908            break;
    19221909
     
    19291916                ||  pReqHdr)
    19301917                return VERR_INVALID_PARAMETER;
    1931             rc = GMMR0CheckSharedModules(pGVM, pVM, idCpu);
    1932             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1918            rc = GMMR0CheckSharedModules(pGVM, idCpu);
     1919            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    19331920            break;
    19341921        }
     
    19391926            if (u64Arg)
    19401927                return VERR_INVALID_PARAMETER;
    1941             rc = GMMR0FindDuplicatePageReq(pGVM, pVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
    1942             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1928            rc = GMMR0FindDuplicatePageReq(pGVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
     1929            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    19431930            break;
    19441931#endif
     
    19471934            if (u64Arg)
    19481935                return VERR_INVALID_PARAMETER;
    1949             rc = GMMR0QueryStatisticsReq(pGVM, pVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
    1950             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1936            rc = GMMR0QueryStatisticsReq(pGVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
     1937            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    19511938            break;
    19521939
     
    19541941            if (u64Arg)
    19551942                return VERR_INVALID_PARAMETER;
    1956             rc = GMMR0ResetStatisticsReq(pGVM, pVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
    1957             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1943            rc = GMMR0ResetStatisticsReq(pGVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
     1944            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    19581945            break;
    19591946
     
    19651952        case VMMR0_DO_GCFGM_QUERY_VALUE:
    19661953        {
    1967             if (pGVM || pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
     1954            if (pGVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
    19681955                return VERR_INVALID_PARAMETER;
    19691956            PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
     
    19821969                //    rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
    19831970            }
    1984             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1971            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    19851972            break;
    19861973        }
     
    19931980            if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
    19941981                return VERR_INVALID_PARAMETER;
    1995             rc = PDMR0DriverCallReqHandler(pGVM, pVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
    1996             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1982            rc = PDMR0DriverCallReqHandler(pGVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
     1983            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    19971984            break;
    19981985        }
     
    20021989            if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
    20031990                return VERR_INVALID_PARAMETER;
    2004             rc = PDMR0DeviceCallReqHandler(pGVM, pVM, (PPDMDEVICECALLREQHANDLERREQ)pReqHdr);
    2005             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1991            rc = PDMR0DeviceCallReqHandler(pGVM, (PPDMDEVICECALLREQHANDLERREQ)pReqHdr);
     1992            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    20061993            break;
    20071994        }
     
    20132000        {
    20142001            PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
    2015             if (u64Arg || !pReq || !vmmR0IsValidSession(pVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
     2002            if (u64Arg || !pReq || !vmmR0IsValidSession(pGVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
    20162003                return VERR_INVALID_PARAMETER;
    20172004            rc = IntNetR0OpenReq(pSession, pReq);
    2018             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     2005            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    20192006            break;
    20202007        }
    20212008
    20222009        case VMMR0_DO_INTNET_IF_CLOSE:
    2023             if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
     2010            if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
    20242011                return VERR_INVALID_PARAMETER;
    20252012            rc = IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
    2026             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     2013            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    20272014            break;
    20282015
    20292016
    20302017        case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
    2031             if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
     2018            if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
    20322019                return VERR_INVALID_PARAMETER;
    20332020            rc = IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
    2034             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     2021            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    20352022            break;
    20362023
    20372024        case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
    2038             if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
     2025            if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
    20392026                return VERR_INVALID_PARAMETER;
    20402027            rc = IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
    2041             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     2028            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    20422029            break;
    20432030
    20442031        case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
    2045             if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
     2032            if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
    20462033                return VERR_INVALID_PARAMETER;
    20472034            rc = IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
    2048             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     2035            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    20492036            break;
    20502037
    20512038        case VMMR0_DO_INTNET_IF_SET_ACTIVE:
    2052             if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
     2039            if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
    20532040                return VERR_INVALID_PARAMETER;
    20542041            rc = IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
    2055             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     2042            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    20562043            break;
    20572044
    20582045        case VMMR0_DO_INTNET_IF_SEND:
    2059             if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
     2046            if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
    20602047                return VERR_INVALID_PARAMETER;
    20612048            rc = IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
    2062             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     2049            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    20632050            break;
    20642051
    20652052        case VMMR0_DO_INTNET_IF_WAIT:
    2066             if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
     2053            if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
    20672054                return VERR_INVALID_PARAMETER;
    20682055            rc = IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
    2069             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     2056            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    20702057            break;
    20712058
    20722059        case VMMR0_DO_INTNET_IF_ABORT_WAIT:
    2073             if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
     2060            if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
    20742061                return VERR_INVALID_PARAMETER;
    20752062            rc = IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
    2076             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     2063            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    20772064            break;
    20782065
     
    20822069         */
    20832070        case VMMR0_DO_PCIRAW_REQ:
    2084             if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
    2085                 return VERR_INVALID_PARAMETER;
    2086             rc = PciRawR0ProcessReq(pGVM, pVM, pSession, (PPCIRAWSENDREQ)pReqHdr);
    2087             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     2071            if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
     2072                return VERR_INVALID_PARAMETER;
     2073            rc = PciRawR0ProcessReq(pGVM, pSession, (PPCIRAWSENDREQ)pReqHdr);
     2074            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    20882075            break;
    20892076#endif
     
    20972084            if (u64Arg || pReqHdr || idCpu != 0)
    20982085                return VERR_INVALID_PARAMETER;
    2099             rc = NEMR0InitVM(pGVM, pVM);
    2100             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     2086            rc = NEMR0InitVM(pGVM);
     2087            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    21012088            break;
    21022089
     
    21042091            if (u64Arg || pReqHdr || idCpu != 0)
    21052092                return VERR_INVALID_PARAMETER;
    2106             rc = NEMR0InitVMPart2(pGVM, pVM);
    2107             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     2093            rc = NEMR0InitVMPart2(pGVM);
     2094            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    21082095            break;
    21092096
     
    21112098            if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
    21122099                return VERR_INVALID_PARAMETER;
    2113             rc = NEMR0MapPages(pGVM, pVM, idCpu);
    2114             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     2100            rc = NEMR0MapPages(pGVM, idCpu);
     2101            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    21152102            break;
    21162103
     
    21182105            if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
    21192106                return VERR_INVALID_PARAMETER;
    2120             rc = NEMR0UnmapPages(pGVM, pVM, idCpu);
    2121             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     2107            rc = NEMR0UnmapPages(pGVM, idCpu);
     2108            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    21222109            break;
    21232110
     
    21252112            if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
    21262113                return VERR_INVALID_PARAMETER;
    2127             rc = NEMR0ExportState(pGVM, pVM, idCpu);
    2128             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     2114            rc = NEMR0ExportState(pGVM, idCpu);
     2115            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    21292116            break;
    21302117
     
    21322119            if (pReqHdr || idCpu == NIL_VMCPUID)
    21332120                return VERR_INVALID_PARAMETER;
    2134             rc = NEMR0ImportState(pGVM, pVM, idCpu, u64Arg);
    2135             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     2121            rc = NEMR0ImportState(pGVM, idCpu, u64Arg);
     2122            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    21362123            break;
    21372124
     
    21392126            if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
    21402127                return VERR_INVALID_PARAMETER;
    2141             rc = NEMR0QueryCpuTick(pGVM, pVM, idCpu);
    2142             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     2128            rc = NEMR0QueryCpuTick(pGVM, idCpu);
     2129            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    21432130            break;
    21442131
     
    21462133            if (pReqHdr || idCpu == NIL_VMCPUID)
    21472134                return VERR_INVALID_PARAMETER;
    2148             rc = NEMR0ResumeCpuTickOnAll(pGVM, pVM, idCpu, u64Arg);
    2149             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     2135            rc = NEMR0ResumeCpuTickOnAll(pGVM, idCpu, u64Arg);
     2136            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    21502137            break;
    21512138
     
    21532140            if (u64Arg || pReqHdr)
    21542141                return VERR_INVALID_PARAMETER;
    2155             rc = NEMR0UpdateStatistics(pGVM, pVM, idCpu);
    2156             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     2142            rc = NEMR0UpdateStatistics(pGVM, idCpu);
     2143            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    21572144            break;
    21582145
     
    21612148            if (pReqHdr)
    21622149                return VERR_INVALID_PARAMETER;
    2163             rc = NEMR0DoExperiment(pGVM, pVM, idCpu, u64Arg);
    2164             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     2150            rc = NEMR0DoExperiment(pGVM, idCpu, u64Arg);
     2151            VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
    21652152            break;
    21662153#   endif
     
    22002187{
    22012188    PGVM                pGVM;
    2202     PVMCC               pVM;
    22032189    VMCPUID             idCpu;
    22042190    VMMR0OPERATION      enmOperation;
     
    22192205{
    22202206    return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pGVM,
    2221                               ((PVMMR0ENTRYEXARGS)pvArgs)->pVM,
    22222207                              ((PVMMR0ENTRYEXARGS)pvArgs)->idCpu,
    22232208                              ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
     
    22512236    if (   pVM  != NULL
    22522237        && pGVM != NULL
     2238        && pVM  == pGVM /** @todo drop pGVM */
    22532239        && idCpu < pGVM->cCpus
    2254         && pVM->pSession == pSession
    2255         && pVM->pSelf != NULL)
     2240        && pGVM->pSession == pSession
     2241        && pGVM->pSelf    == pVM)
    22562242    {
    22572243        switch (enmOperation)
     
    22792265                    VMMR0ENTRYEXARGS Args;
    22802266                    Args.pGVM = pGVM;
    2281                     Args.pVM = pVM;
    22822267                    Args.idCpu = idCpu;
    22832268                    Args.enmOperation = enmOperation;
     
    22942279        }
    22952280    }
    2296     return vmmR0EntryExWorker(pGVM, pVM, idCpu, enmOperation, pReq, u64Arg, pSession);
     2281    return vmmR0EntryExWorker(pGVM, idCpu, enmOperation, pReq, u64Arg, pSession);
    22972282}
    22982283
  • trunk/src/VBox/VMM/VMMR3/NEMR3Native-win.cpp

    r80333 r80346  
    17241724    }
    17251725#endif
    1726     return nemHCWinRunGC(pVM, pVCpu, NULL /*pGVM*/, NULL /*pGVCpu*/);
     1726    return nemHCWinRunGC(pVM, pVCpu);
    17271727}
    17281728
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette