VirtualBox

Changeset 108761 in vbox


Ignore:
Timestamp:
Mar 27, 2025 9:15:09 AM (3 weeks ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
168188
Message:

VMM/NEMR3Native-win-armv8.cpp: Some cleanup, get rid of unused memory mapping related code, bugref:10392

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR3/NEMR3Native-win-armv8.cpp

    r108759 r108761  
    277277#endif
    278278
    279 #if 0 /* unused */
    280 /** WHV_MEMORY_ACCESS_TYPE names */
    281 static const char * const g_apszWHvMemAccesstypes[4] = { "read", "write", "exec", "!undefined!" };
    282 #endif
    283 /** NEM_WIN_PAGE_STATE_XXX names. */
    284 NEM_TMPL_STATIC const char * const g_apszPageStates[4] = { "not-set", "unmapped", "readable", "writable" };
    285 #ifdef LOG_ENABLED
    286 /** HV_INTERCEPT_ACCESS_TYPE names. */
    287 static const char * const g_apszHvInterceptAccessTypes[4] = { "read", "write", "exec", "!undefined!" };
    288 #endif
    289 
    290279
    291280/*********************************************************************************************************************************
     
    295284DECLINLINE(int) nemR3NativeGCPhys2R3PtrWriteable(PVM pVM, RTGCPHYS GCPhys, void **ppv);
    296285
    297 NEM_TMPL_STATIC int nemHCNativeSetPhysPage(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
    298                                            uint32_t fPageProt, uint8_t *pu2State, bool fBackingChanged);
    299286
    300287/**
     
    20081995
    20091996/**
    2010  * State to pass between nemHCWinHandleMemoryAccess / nemR3WinWHvHandleMemoryAccess
    2011  * and nemHCWinHandleMemoryAccessPageCheckerCallback.
    2012  */
    2013 typedef struct NEMHCWINHMACPCCSTATE
    2014 {
    2015     /** Input: Write access. */
    2016     bool    fWriteAccess;
    2017     /** Output: Set if we did something. */
    2018     bool    fDidSomething;
    2019     /** Output: Set it we should resume. */
    2020     bool    fCanResume;
    2021 } NEMHCWINHMACPCCSTATE;
    2022 
    2023 /**
    2024  * @callback_method_impl{FNPGMPHYSNEMCHECKPAGE,
    2025  *      Worker for nemR3WinHandleMemoryAccess; pvUser points to a
    2026  *      NEMHCWINHMACPCCSTATE structure. }
    2027  */
    2028 NEM_TMPL_STATIC DECLCALLBACK(int)
    2029 nemHCWinHandleMemoryAccessPageCheckerCallback(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, PPGMPHYSNEMPAGEINFO pInfo, void *pvUser)
    2030 {
    2031     NEMHCWINHMACPCCSTATE *pState = (NEMHCWINHMACPCCSTATE *)pvUser;
    2032     pState->fDidSomething = false;
    2033     pState->fCanResume    = false;
    2034 
    2035     /* If A20 is disabled, we may need to make another query on the masked
    2036        page to get the correct protection information. */
    2037     uint8_t  u2State = pInfo->u2NemState;
    2038     RTGCPHYS GCPhysSrc = GCPhys;
    2039 
    2040     /*
    2041      * Consolidate current page state with actual page protection and access type.
    2042      * We don't really consider downgrades here, as they shouldn't happen.
    2043      */
    2044     int rc;
    2045     switch (u2State)
    2046     {
    2047         case NEM_WIN_PAGE_STATE_UNMAPPED:
    2048         case NEM_WIN_PAGE_STATE_NOT_SET:
    2049             if (pInfo->fNemProt == NEM_PAGE_PROT_NONE)
    2050             {
    2051                 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #1\n", GCPhys));
    2052                 return VINF_SUCCESS;
    2053             }
    2054 
    2055             /* Don't bother remapping it if it's a write request to a non-writable page. */
    2056             if (   pState->fWriteAccess
    2057                 && !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE))
    2058             {
    2059                 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #1w\n", GCPhys));
    2060                 return VINF_SUCCESS;
    2061             }
    2062 
    2063             /* Map the page. */
    2064             rc = nemHCNativeSetPhysPage(pVM,
    2065                                         pVCpu,
    2066                                         GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
    2067                                         GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
    2068                                         pInfo->fNemProt,
    2069                                         &u2State,
    2070                                         true /*fBackingState*/);
    2071             pInfo->u2NemState = u2State;
    2072             Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - synced => %s + %Rrc\n",
    2073                   GCPhys, g_apszPageStates[u2State], rc));
    2074             pState->fDidSomething = true;
    2075             pState->fCanResume    = true;
    2076             return rc;
    2077 
    2078         case NEM_WIN_PAGE_STATE_READABLE:
    2079             if (   !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
    2080                 && (pInfo->fNemProt & (NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE)))
    2081             {
    2082                 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #2\n", GCPhys));
    2083                 return VINF_SUCCESS;
    2084             }
    2085 
    2086             break;
    2087 
    2088         case NEM_WIN_PAGE_STATE_WRITABLE:
    2089             if (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
    2090             {
    2091                 if (pInfo->u2OldNemState == NEM_WIN_PAGE_STATE_WRITABLE)
    2092                     Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #3a\n", GCPhys));
    2093                 else
    2094                 {
    2095                     pState->fCanResume = true;
    2096                     Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #3b (%s -> %s)\n",
    2097                           GCPhys, g_apszPageStates[pInfo->u2OldNemState], g_apszPageStates[u2State]));
    2098                 }
    2099                 return VINF_SUCCESS;
    2100             }
    2101             break;
    2102 
    2103         default:
    2104             AssertLogRelMsgFailedReturn(("u2State=%#x\n", u2State), VERR_NEM_IPE_4);
    2105     }
    2106 
    2107     /*
    2108      * Unmap and restart the instruction.
    2109      * If this fails, which it does every so often, just unmap everything for now.
    2110      */
    2111     /** @todo figure out whether we mess up the state or if it's WHv.   */
    2112     STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
    2113     HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
    2114     STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
    2115     if (SUCCEEDED(hrc))
    2116     {
    2117         pState->fDidSomething = true;
    2118         pState->fCanResume    = true;
    2119         pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
    2120         STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
    2121         uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
    2122         Log5(("NEM GPA unmapped/exit: %RGp (was %s, cMappedPages=%u)\n", GCPhys, g_apszPageStates[u2State], cMappedPages));
    2123         return VINF_SUCCESS;
    2124     }
    2125     STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
    2126     LogRel(("nemHCWinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp %s hrc=%Rhrc (%#x)\n",
    2127             GCPhys, g_apszPageStates[u2State], hrc, hrc));
    2128     return VERR_NEM_UNMAP_PAGES_FAILED;
    2129 }
    2130 
    2131 
    2132 /**
    21331997 * Returns the byte size from the given access SAS value.
    21341998 *
     
    22312095
    22322096    /*
    2233      * Ask PGM for information about the given GCPhys.  We need to check if we're
    2234      * out of sync first.
     2097     * Emulate the memory access, either access handler or special memory.
    22352098     */
    22362099    WHV_INTERCEPT_MESSAGE_HEADER const *pHdr = &pExit->MemoryAccess.Header;
    2237     NEMHCWINHMACPCCSTATE State = { pExit->MemoryAccess.Header.InterceptAccessType == WHvMemoryAccessWrite, false, false };
    2238     PGMPHYSNEMPAGEINFO   Info;
    2239     int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pExit->MemoryAccess.Gpa, State.fWriteAccess, &Info,
    2240                                        nemHCWinHandleMemoryAccessPageCheckerCallback, &State);
    2241     if (RT_SUCCESS(rc))
    2242     {
    2243         if (Info.fNemProt & (  pExit->MemoryAccess.Header.InterceptAccessType == WHvMemoryAccessWrite
    2244                              ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
    2245         {
    2246             if (State.fCanResume)
    2247             {
    2248                 Log4(("MemExit/%u: %08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting (%s)\n",
    2249                       pVCpu->idCpu, pHdr->Pc,
    2250                       pExit->MemoryAccess.Gpa, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
    2251                       Info.fHasHandlers ? " handlers" : "", Info.fZeroPage    ? " zero-pg" : "",
    2252                       State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pExit->MemoryAccess.Header.InterceptAccessType]));
    2253                 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
    2254                                  pHdr->Pc, uHostTsc);
    2255                 return VINF_SUCCESS;
    2256             }
    2257         }
    2258         Log4(("MemExit/%u: %08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating (%s)\n",
    2259               pVCpu->idCpu, pHdr->Pc,
    2260               pExit->MemoryAccess.Gpa, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
    2261               Info.fHasHandlers ? " handlers" : "", Info.fZeroPage    ? " zero-pg" : "",
    2262               State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pExit->MemoryAccess.Header.InterceptAccessType]));
    2263     }
    2264     else
    2265         Log4(("MemExit/%u: %08RX64: %RGp rc=%Rrc%s; emulating (%s)\n",
    2266               pVCpu->idCpu, pHdr->Pc,
    2267               pExit->MemoryAccess.Gpa, rc, State.fDidSomething ? " modified-backing" : "",
    2268               g_apszHvInterceptAccessTypes[pExit->MemoryAccess.Header.InterceptAccessType]));
    2269 
    2270     /*
    2271      * Emulate the memory access, either access handler or special memory.
    2272      */
    22732100    PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
    22742101                                              pExit->MemoryAccess.Header.InterceptAccessType == WHvMemoryAccessWrite
     
    22782105    nemR3WinCopyStateFromArmHeader(pVCpu, &pExit->MemoryAccess.Header);
    22792106    RT_NOREF_PV(pExitRec);
    2280     rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
     2107    int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
    22812108    AssertRCReturn(rc, rc);
    22822109
     
    30952922                                                     uint8_t *pu2State, uint32_t *puNemRange)
    30962923{
    3097     Log5(("nemR3NativeNotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x\n", GCPhys, cb, pvPages, fFlags));
     2924    RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange);
     2925
     2926    Log5(("NEMR3NotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x\n", GCPhys, cb, pvPages, fFlags));
    30982927    *pu2State   = UINT8_MAX;
    30992928    *puNemRange = 0;
    3100 
    3101 #if 0 /* Let's not do this after all.  We'll protection change notifications for each page and if not we'll map them lazily. */
    3102     RTGCPHYS const cPages = cb >> X86_PAGE_SHIFT;
    3103     for (RTGCPHYS iPage = 0; iPage < cPages; iPage++, GCPhys += X86_PAGE_SIZE)
    3104     {
    3105         const void *pvPage;
    3106         int rc = nemR3NativeGCPhys2R3PtrReadOnly(pVM, GCPhys, &pvPage);
    3107         if (RT_SUCCESS(rc))
    3108         {
    3109             HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, (void *)pvPage, GCPhys, X86_PAGE_SIZE,
    3110                                          WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute);
    3111             if (SUCCEEDED(hrc))
    3112             { /* likely */ }
    3113             else
    3114             {
    3115                 LogRel(("nemR3NativeNotifyPhysRomRegisterEarly: GCPhys=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
    3116                         GCPhys, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
    3117                 return VERR_NEM_INIT_FAILED;
    3118             }
    3119         }
    3120         else
    3121         {
    3122             LogRel(("nemR3NativeNotifyPhysRomRegisterEarly: GCPhys=%RGp rc=%Rrc\n", GCPhys, rc));
    3123             return rc;
    3124         }
    3125     }
    3126     RT_NOREF_PV(fFlags);
    3127 #else
    3128     RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags);
    3129 #endif
    31302929    return VINF_SUCCESS;
    31312930}
     
    32063005
    32073006
    3208 /**
    3209  * Worker that maps pages into Hyper-V.
    3210  *
    3211  * This is used by the PGM physical page notifications as well as the memory
    3212  * access VMEXIT handlers.
    3213  *
    3214  * @returns VBox status code.
    3215  * @param   pVM             The cross context VM structure.
    3216  * @param   pVCpu           The cross context virtual CPU structure of the
    3217  *                          calling EMT.
    3218  * @param   GCPhysSrc       The source page address.
    3219  * @param   GCPhysDst       The hyper-V destination page.  This may differ from
    3220  *                          GCPhysSrc when A20 is disabled.
    3221  * @param   fPageProt       NEM_PAGE_PROT_XXX.
    3222  * @param   pu2State        Our page state (input/output).
    3223  * @param   fBackingChanged Set if the page backing is being changed.
    3224  * @thread  EMT(pVCpu)
    3225  */
    3226 NEM_TMPL_STATIC int nemHCNativeSetPhysPage(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
    3227                                            uint32_t fPageProt, uint8_t *pu2State, bool fBackingChanged)
    3228 {
    3229     /*
    3230      * Looks like we need to unmap a page before we can change the backing
    3231      * or even modify the protection.  This is going to be *REALLY* efficient.
    3232      * PGM lends us two bits to keep track of the state here.
    3233      */
    3234     RT_NOREF(pVCpu);
    3235     uint8_t const u2OldState = *pu2State;
    3236     uint8_t const u2NewState = fPageProt & NEM_PAGE_PROT_WRITE ? NEM_WIN_PAGE_STATE_WRITABLE
    3237                              : fPageProt & NEM_PAGE_PROT_READ  ? NEM_WIN_PAGE_STATE_READABLE : NEM_WIN_PAGE_STATE_UNMAPPED;
    3238     if (   fBackingChanged
    3239         || u2NewState != u2OldState)
    3240     {
    3241         if (u2OldState > NEM_WIN_PAGE_STATE_UNMAPPED)
    3242         {
    3243             STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
    3244             HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst, X86_PAGE_SIZE);
    3245             STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
    3246             if (SUCCEEDED(hrc))
    3247             {
    3248                 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
    3249                 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
    3250                 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
    3251                 if (u2NewState == NEM_WIN_PAGE_STATE_UNMAPPED)
    3252                 {
    3253                     Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n",
    3254                           GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
    3255                     return VINF_SUCCESS;
    3256                 }
    3257             }
    3258             else
    3259             {
    3260                 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
    3261                 LogRel(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
    3262                         GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
    3263                 return VERR_NEM_INIT_FAILED;
    3264             }
    3265         }
    3266     }
    3267 
    3268     /*
    3269      * Writeable mapping?
    3270      */
    3271     if (fPageProt & NEM_PAGE_PROT_WRITE)
    3272     {
    3273         void *pvPage;
    3274         int rc = nemR3NativeGCPhys2R3PtrWriteable(pVM, GCPhysSrc, &pvPage);
    3275         if (RT_SUCCESS(rc))
    3276         {
    3277             HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvPage, GCPhysDst, X86_PAGE_SIZE,
    3278                                          WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute | WHvMapGpaRangeFlagWrite);
    3279             if (SUCCEEDED(hrc))
    3280             {
    3281                 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
    3282                 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPage);
    3283                 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
    3284                 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
    3285                       GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
    3286                 return VINF_SUCCESS;
    3287             }
    3288             STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
    3289             LogRel(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
    3290                     GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
    3291             return VERR_NEM_INIT_FAILED;
    3292         }
    3293         LogRel(("nemHCNativeSetPhysPage/writable: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
    3294         return rc;
    3295     }
    3296 
    3297     if (fPageProt & NEM_PAGE_PROT_READ)
    3298     {
    3299         const void *pvPage;
    3300         int rc = nemR3NativeGCPhys2R3PtrReadOnly(pVM, GCPhysSrc, &pvPage);
    3301         if (RT_SUCCESS(rc))
    3302         {
    3303             STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRangePage, a);
    3304             HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, (void *)pvPage, GCPhysDst, X86_PAGE_SIZE,
    3305                                          WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute);
    3306             STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRangePage, a);
    3307             if (SUCCEEDED(hrc))
    3308             {
    3309                 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
    3310                 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPage);
    3311                 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
    3312                 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
    3313                       GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
    3314                 return VINF_SUCCESS;
    3315             }
    3316             STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
    3317             LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
    3318                     GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
    3319             return VERR_NEM_INIT_FAILED;
    3320         }
    3321         LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
    3322         return rc;
    3323     }
    3324 
    3325     /* We already unmapped it above. */
    3326     *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
    3327     return VINF_SUCCESS;
    3328 }
    3329 
    3330 
    3331 NEM_TMPL_STATIC int nemHCJustUnmapPageFromHyperV(PVMCC pVM, RTGCPHYS GCPhysDst, uint8_t *pu2State)
    3332 {
    3333     if (*pu2State <= NEM_WIN_PAGE_STATE_UNMAPPED)
    3334     {
    3335         Log5(("nemHCJustUnmapPageFromHyperV: %RGp == unmapped\n", GCPhysDst));
    3336         *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
    3337         return VINF_SUCCESS;
    3338     }
    3339 
    3340     STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
    3341     HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, X86_PAGE_SIZE);
    3342     STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
    3343     if (SUCCEEDED(hrc))
    3344     {
    3345         STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
    3346         uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
    3347         *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
    3348         Log5(("nemHCJustUnmapPageFromHyperV: %RGp => unmapped (total %u)\n", GCPhysDst, cMappedPages));
    3349         return VINF_SUCCESS;
    3350     }
    3351     STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
    3352     LogRel(("nemHCJustUnmapPageFromHyperV(%RGp): failed! hrc=%Rhrc (%#x) Last=%#x/%u\n",
    3353             GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
    3354     return VERR_NEM_IPE_6;
    3355 }
    3356 
    3357 
    33583007int nemHCNativeNotifyPhysPageAllocated(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
    33593008                                       PGMPAGETYPE enmType, uint8_t *pu2State)
     
    33613010    Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
    33623011          GCPhys, HCPhys, fPageProt, enmType, *pu2State));
    3363     RT_NOREF_PV(HCPhys); RT_NOREF_PV(enmType);
    3364 
    3365     int rc;
    3366     RT_NOREF_PV(fPageProt);
    3367     rc = nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
    3368     return rc;
     3012    RT_NOREF(pVM, GCPhys, HCPhys, fPageProt, enmType, pu2State);
     3013
     3014    AssertFailed();
     3015    return VINF_SUCCESS;
    33693016}
    33703017
     
    33753022    Log5(("NEMHCNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
    33763023          GCPhys, HCPhys, fPageProt, enmType, *pu2State));
    3377     Assert(VM_IS_NEM_ENABLED(pVM));
    3378     RT_NOREF(HCPhys, enmType, pvR3);
    3379 
    3380     RT_NOREF_PV(fPageProt);
    3381     nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
     3024    RT_NOREF(pVM, GCPhys, HCPhys, pvR3, fPageProt, enmType, pu2State);
    33823025}
    33833026
     
    33863029                                              RTR3PTR pvNewR3, uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
    33873030{
    3388     Log5(("nemHCNativeNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp pvNewR3=%p fPageProt=%#x enmType=%d *pu2State=%d\n",
    3389           GCPhys, HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType, *pu2State));
    3390     Assert(VM_IS_NEM_ENABLED(pVM));
    3391     RT_NOREF(HCPhysPrev, HCPhysNew, pvNewR3, enmType);
    3392 
    3393     RT_NOREF_PV(fPageProt);
    3394     nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
     3031    Log5(("NEMHCNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
     3032          GCPhys, HCPhysPrev, HCPhysNew, fPageProt, enmType, *pu2State));
     3033    RT_NOREF(pVM, GCPhys, HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType, pu2State);
     3034
     3035    AssertFailed();
    33953036}
    33963037
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette