VirtualBox

Changeset 13798 in vbox


Ignore:
Timestamp:
Nov 4, 2008 6:57:19 PM (16 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
38787
Message:

VMM: Split out the switcher code from VMM.cpp and into VMMSwitcher.cpp.

Location:
trunk/src/VBox/VMM
Files:
3 edited
1 copied

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/Makefile.kmk

    r13699 r13798  
    9696        VMM.cpp \
    9797        VMMGuruMeditation.cpp \
     98        VMMSwitcher.cpp \
    9899        VMMTests.cpp \
    99100        HWACCM.cpp \
  • trunk/src/VBox/VMM/VMM.cpp

    r13796 r13798  
    9999
    100100/*******************************************************************************
    101 *   Global Variables                                                           *
    102 *******************************************************************************/
    103 /** Array of switcher defininitions.
    104  * The type and index shall match!
    105  */
    106 static PVMMSWITCHERDEF s_apSwitchers[VMMSWITCHER_MAX] =
    107 {
    108     NULL, /* invalid entry */
    109 #ifndef RT_ARCH_AMD64
    110     &vmmR3Switcher32BitTo32Bit_Def,
    111     &vmmR3Switcher32BitToPAE_Def,
    112     NULL,   //&vmmR3Switcher32BitToAMD64_Def,
    113     &vmmR3SwitcherPAETo32Bit_Def,
    114     &vmmR3SwitcherPAEToPAE_Def,
    115     NULL,   //&vmmR3SwitcherPAEToAMD64_Def,
    116 # ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
    117     &vmmR3SwitcherAMD64ToPAE_Def,
    118 # else
    119     NULL,   //&vmmR3SwitcherAMD64ToPAE_Def,
    120 # endif
    121     NULL    //&vmmR3SwitcherAMD64ToAMD64_Def,
    122 #else  /* RT_ARCH_AMD64 */
    123     NULL,   //&vmmR3Switcher32BitTo32Bit_Def,
    124     NULL,   //&vmmR3Switcher32BitToPAE_Def,
    125     NULL,   //&vmmR3Switcher32BitToAMD64_Def,
    126     NULL,   //&vmmR3SwitcherPAETo32Bit_Def,
    127     NULL,   //&vmmR3SwitcherPAEToPAE_Def,
    128     NULL,   //&vmmR3SwitcherPAEToAMD64_Def,
    129     &vmmR3SwitcherAMD64ToPAE_Def,
    130     NULL    //&vmmR3SwitcherAMD64ToAMD64_Def,
    131 #endif /* RT_ARCH_AMD64 */
    132 };
    133 
    134 
    135 /*******************************************************************************
    136101*   Internal Functions                                                         *
    137102*******************************************************************************/
    138 static int                  vmmR3InitCoreCode(PVM pVM);
    139103static int                  vmmR3InitStacks(PVM pVM);
    140104static int                  vmmR3InitLoggers(PVM pVM);
     
    198162     * Init various sub-components.
    199163     */
    200     rc = vmmR3InitCoreCode(pVM);
     164    rc = vmmR3SwitcherInit(pVM);
    201165    if (RT_SUCCESS(rc))
    202166    {
     
    241205    }
    242206
    243     return rc;
    244 }
    245 
    246 
    247 /**
    248  * Initializes the per-VCPU VMM.
    249  *
    250  * @returns VBox status code.
    251  * @param   pVM         The VM to operate on.
    252  */
    253 VMMR3DECL(int) VMMR3InitCPU(PVM pVM)
    254 {
    255     LogFlow(("VMMR3InitCPU\n"));
    256     return VINF_SUCCESS;
    257 }
    258 
    259 
    260 /**
    261  * VMMR3Init worker that initiates the core code.
    262  *
    263  * This is core per VM code which might need fixups and/or for ease of use are
    264  * put on linear contiguous backing.
    265  *
    266  * @returns VBox status code.
    267  * @param   pVM     Pointer to the shared VM structure.
    268  */
    269 static int vmmR3InitCoreCode(PVM pVM)
    270 {
    271     /*
    272      * Calc the size.
    273      */
    274     unsigned cbCoreCode = 0;
    275     for (unsigned iSwitcher = 0; iSwitcher < RT_ELEMENTS(s_apSwitchers); iSwitcher++)
    276     {
    277         pVM->vmm.s.aoffSwitchers[iSwitcher] = cbCoreCode;
    278         PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher];
    279         if (pSwitcher)
    280         {
    281             AssertRelease((unsigned)pSwitcher->enmType == iSwitcher);
    282             cbCoreCode += RT_ALIGN_32(pSwitcher->cbCode + 1, 32);
    283         }
    284     }
    285 
    286     /*
    287      * Allocate continguous pages for switchers and deal with
    288      * conflicts in the intermediate mapping of the code.
    289      */
    290     pVM->vmm.s.cbCoreCode = RT_ALIGN_32(cbCoreCode, PAGE_SIZE);
    291     pVM->vmm.s.pvCoreCodeR3 = SUPContAlloc2(pVM->vmm.s.cbCoreCode >> PAGE_SHIFT, &pVM->vmm.s.pvCoreCodeR0, &pVM->vmm.s.HCPhysCoreCode);
    292     int rc = VERR_NO_MEMORY;
    293     if (pVM->vmm.s.pvCoreCodeR3)
    294     {
    295         rc = PGMR3MapIntermediate(pVM, pVM->vmm.s.pvCoreCodeR0, pVM->vmm.s.HCPhysCoreCode, cbCoreCode);
    296         if (rc == VERR_PGM_INTERMEDIATE_PAGING_CONFLICT)
    297         {
    298             /* try more allocations - Solaris, Linux.  */
    299             const unsigned cTries = 8234;
    300             struct VMMInitBadTry
    301             {
    302                 RTR0PTR  pvR0;
    303                 void    *pvR3;
    304                 RTHCPHYS HCPhys;
    305                 RTUINT   cb;
    306             } *paBadTries = (struct VMMInitBadTry *)RTMemTmpAlloc(sizeof(*paBadTries) * cTries);
    307             AssertReturn(paBadTries, VERR_NO_TMP_MEMORY);
    308             unsigned i = 0;
    309             do
    310             {
    311                 paBadTries[i].pvR3 = pVM->vmm.s.pvCoreCodeR3;
    312                 paBadTries[i].pvR0 = pVM->vmm.s.pvCoreCodeR0;
    313                 paBadTries[i].HCPhys = pVM->vmm.s.HCPhysCoreCode;
    314                 i++;
    315                 pVM->vmm.s.pvCoreCodeR0 = NIL_RTR0PTR;
    316                 pVM->vmm.s.HCPhysCoreCode = NIL_RTHCPHYS;
    317                 pVM->vmm.s.pvCoreCodeR3 = SUPContAlloc2(pVM->vmm.s.cbCoreCode >> PAGE_SHIFT, &pVM->vmm.s.pvCoreCodeR0, &pVM->vmm.s.HCPhysCoreCode);
    318                 if (!pVM->vmm.s.pvCoreCodeR3)
    319                     break;
    320                 rc = PGMR3MapIntermediate(pVM, pVM->vmm.s.pvCoreCodeR0, pVM->vmm.s.HCPhysCoreCode, cbCoreCode);
    321             } while (   rc == VERR_PGM_INTERMEDIATE_PAGING_CONFLICT
    322                      && i < cTries - 1);
    323 
    324             /* cleanup */
    325             if (VBOX_FAILURE(rc))
    326             {
    327                 paBadTries[i].pvR3   = pVM->vmm.s.pvCoreCodeR3;
    328                 paBadTries[i].pvR0   = pVM->vmm.s.pvCoreCodeR0;
    329                 paBadTries[i].HCPhys = pVM->vmm.s.HCPhysCoreCode;
    330                 paBadTries[i].cb     = pVM->vmm.s.cbCoreCode;
    331                 i++;
    332                 LogRel(("Failed to allocated and map core code: rc=%Vrc\n", rc));
    333             }
    334             while (i-- > 0)
    335             {
    336                 LogRel(("Core code alloc attempt #%d: pvR3=%p pvR0=%p HCPhys=%VHp\n",
    337                         i, paBadTries[i].pvR3, paBadTries[i].pvR0, paBadTries[i].HCPhys));
    338                 SUPContFree(paBadTries[i].pvR3, paBadTries[i].cb >> PAGE_SHIFT);
    339             }
    340             RTMemTmpFree(paBadTries);
    341         }
    342     }
    343     if (VBOX_SUCCESS(rc))
    344     {
    345         /*
    346          * copy the code.
    347          */
    348         for (unsigned iSwitcher = 0; iSwitcher < RT_ELEMENTS(s_apSwitchers); iSwitcher++)
    349         {
    350             PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher];
    351             if (pSwitcher)
    352                 memcpy((uint8_t *)pVM->vmm.s.pvCoreCodeR3 + pVM->vmm.s.aoffSwitchers[iSwitcher],
    353                        pSwitcher->pvCode, pSwitcher->cbCode);
    354         }
    355 
    356         /*
    357          * Map the code into the GC address space.
    358          */
    359         RTGCPTR GCPtr;
    360         rc = MMR3HyperMapHCPhys(pVM, pVM->vmm.s.pvCoreCodeR3, pVM->vmm.s.HCPhysCoreCode, cbCoreCode, "Core Code", &GCPtr);
    361         if (VBOX_SUCCESS(rc))
    362         {
    363             pVM->vmm.s.pvCoreCodeRC = GCPtr;
    364             MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
    365             LogRel(("CoreCode: R3=%VHv R0=%VHv GC=%VRv Phys=%VHp cb=%#x\n",
    366                     pVM->vmm.s.pvCoreCodeR3, pVM->vmm.s.pvCoreCodeR0, pVM->vmm.s.pvCoreCodeRC, pVM->vmm.s.HCPhysCoreCode, pVM->vmm.s.cbCoreCode));
    367 
    368             /*
    369              * Finally, PGM probably have selected a switcher already but we need
    370              * to get the routine addresses, so we'll reselect it.
    371              * This may legally fail so, we're ignoring the rc.
    372              */
    373             VMMR3SelectSwitcher(pVM, pVM->vmm.s.enmSwitcher);
    374             return rc;
    375         }
    376 
    377         /* shit */
    378         AssertMsgFailed(("PGMR3Map(,%VRv, %VGp, %#x, 0) failed with rc=%Vrc\n", pVM->vmm.s.pvCoreCodeRC, pVM->vmm.s.HCPhysCoreCode, cbCoreCode, rc));
    379         SUPContFree(pVM->vmm.s.pvCoreCodeR3, pVM->vmm.s.cbCoreCode >> PAGE_SHIFT);
    380     }
    381     else
    382         VMSetError(pVM, rc, RT_SRC_POS,
    383                    N_("Failed to allocate %d bytes of contiguous memory for the world switcher code"),
    384                    cbCoreCode);
    385 
    386     pVM->vmm.s.pvCoreCodeR3 = NULL;
    387     pVM->vmm.s.pvCoreCodeR0 = NIL_RTR0PTR;
    388     pVM->vmm.s.pvCoreCodeRC = 0;
    389207    return rc;
    390208}
     
    547365
    548366/**
     367 * Initializes the per-VCPU VMM.
     368 *
     369 * @returns VBox status code.
     370 * @param   pVM         The VM to operate on.
     371 */
     372VMMR3DECL(int) VMMR3InitCPU(PVM pVM)
     373{
     374    LogFlow(("VMMR3InitCPU\n"));
     375    return VINF_SUCCESS;
     376}
     377
     378
     379/**
    549380 * Ring-3 init finalizing.
    550381 *
     
    811642     * All the switchers.
    812643     */
    813     for (unsigned iSwitcher = 0; iSwitcher < RT_ELEMENTS(s_apSwitchers); iSwitcher++)
    814     {
    815         PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher];
    816         if (pSwitcher && pSwitcher->pfnRelocate)
    817         {
    818             unsigned off = pVM->vmm.s.aoffSwitchers[iSwitcher];
    819             pSwitcher->pfnRelocate(pVM,
    820                                    pSwitcher,
    821                                    pVM->vmm.s.pvCoreCodeR0 + off,
    822                                    (uint8_t *)pVM->vmm.s.pvCoreCodeR3 + off,
    823                                    pVM->vmm.s.pvCoreCodeRC + off,
    824                                    pVM->vmm.s.HCPhysCoreCode + off);
    825         }
    826     }
    827 
    828     /*
    829      * Recalc the RC address for the current switcher.
    830      */
    831     PVMMSWITCHERDEF pSwitcher   = s_apSwitchers[pVM->vmm.s.enmSwitcher];
    832     RTRCPTR         RCPtr       = pVM->vmm.s.pvCoreCodeRC + pVM->vmm.s.aoffSwitchers[pVM->vmm.s.enmSwitcher];
    833     pVM->vmm.s.pfnGuestToHostRC         = RCPtr + pSwitcher->offGCGuestToHost;
    834     pVM->vmm.s.pfnCallTrampolineRC      = RCPtr + pSwitcher->offGCCallTrampoline;
    835     pVM->pfnVMMGCGuestToHostAsm         = RCPtr + pSwitcher->offGCGuestToHostAsm;
    836     pVM->pfnVMMGCGuestToHostAsmHyperCtx = RCPtr + pSwitcher->offGCGuestToHostAsmHyperCtx;
    837     pVM->pfnVMMGCGuestToHostAsmGuestCtx = RCPtr + pSwitcher->offGCGuestToHostAsmGuestCtx;
     644    vmmR3SwitcherRelocate(pVM, offDelta);
    838645
    839646    /*
     
    932739
    933740    return rc;
    934 }
    935 
    936 
    937 /**
    938  * Generic switch code relocator.
    939  *
    940  * @param   pVM         The VM handle.
    941  * @param   pSwitcher   The switcher definition.
    942  * @param   pu8CodeR3   Pointer to the core code block for the switcher, ring-3 mapping.
    943  * @param   R0PtrCode   Pointer to the core code block for the switcher, ring-0 mapping.
    944  * @param   GCPtrCode   The guest context address corresponding to pu8Code.
    945  * @param   u32IDCode   The identity mapped (ID) address corresponding to pu8Code.
    946  * @param   SelCS       The hypervisor CS selector.
    947  * @param   SelDS       The hypervisor DS selector.
    948  * @param   SelTSS      The hypervisor TSS selector.
    949  * @param   GCPtrGDT    The GC address of the hypervisor GDT.
    950  * @param   SelCS64     The 64-bit mode hypervisor CS selector.
    951  */
    952 static void vmmR3SwitcherGenericRelocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode,
    953                                          RTSEL SelCS, RTSEL SelDS, RTSEL SelTSS, RTGCPTR GCPtrGDT, RTSEL SelCS64)
    954 {
    955     union
    956     {
    957         const uint8_t *pu8;
    958         const uint16_t *pu16;
    959         const uint32_t *pu32;
    960         const uint64_t *pu64;
    961         const void     *pv;
    962         uintptr_t       u;
    963     } u;
    964     u.pv = pSwitcher->pvFixups;
    965 
    966     /*
    967      * Process fixups.
    968      */
    969     uint8_t u8;
    970     while ((u8 = *u.pu8++) != FIX_THE_END)
    971     {
    972         /*
    973          * Get the source (where to write the fixup).
    974          */
    975         uint32_t offSrc = *u.pu32++;
    976         Assert(offSrc < pSwitcher->cbCode);
    977         union
    978         {
    979             uint8_t    *pu8;
    980             uint16_t   *pu16;
    981             uint32_t   *pu32;
    982             uint64_t   *pu64;
    983             uintptr_t   u;
    984         } uSrc;
    985         uSrc.pu8 = pu8CodeR3 + offSrc;
    986 
    987         /* The fixup target and method depends on the type. */
    988         switch (u8)
    989         {
    990             /*
    991              * 32-bit relative, source in HC and target in GC.
    992              */
    993             case FIX_HC_2_GC_NEAR_REL:
    994             {
    995                 Assert(offSrc - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offSrc - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
    996                 uint32_t offTrg = *u.pu32++;
    997                 Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode);
    998                 *uSrc.pu32 = (uint32_t)((GCPtrCode + offTrg) - (uSrc.u + 4));
    999                 break;
    1000             }
    1001 
    1002             /*
    1003              * 32-bit relative, source in HC and target in ID.
    1004              */
    1005             case FIX_HC_2_ID_NEAR_REL:
    1006             {
    1007                 Assert(offSrc - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offSrc - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
    1008                 uint32_t offTrg = *u.pu32++;
    1009                 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
    1010                 *uSrc.pu32 = (uint32_t)((u32IDCode + offTrg) - (R0PtrCode + offSrc + 4));
    1011                 break;
    1012             }
    1013 
    1014             /*
    1015              * 32-bit relative, source in GC and target in HC.
    1016              */
    1017             case FIX_GC_2_HC_NEAR_REL:
    1018             {
    1019                 Assert(offSrc - pSwitcher->offGCCode < pSwitcher->cbGCCode);
    1020                 uint32_t offTrg = *u.pu32++;
    1021                 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
    1022                 *uSrc.pu32 = (uint32_t)((R0PtrCode + offTrg) - (GCPtrCode + offSrc + 4));
    1023                 break;
    1024             }
    1025 
    1026             /*
    1027              * 32-bit relative, source in GC and target in ID.
    1028              */
    1029             case FIX_GC_2_ID_NEAR_REL:
    1030             {
    1031                 Assert(offSrc - pSwitcher->offGCCode < pSwitcher->cbGCCode);
    1032                 uint32_t offTrg = *u.pu32++;
    1033                 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
    1034                 *uSrc.pu32 = (uint32_t)((u32IDCode + offTrg) - (GCPtrCode + offSrc + 4));
    1035                 break;
    1036             }
    1037 
    1038             /*
    1039              * 32-bit relative, source in ID and target in HC.
    1040              */
    1041             case FIX_ID_2_HC_NEAR_REL:
    1042             {
    1043                 Assert(offSrc - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offSrc - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
    1044                 uint32_t offTrg = *u.pu32++;
    1045                 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
    1046                 *uSrc.pu32 = (uint32_t)((R0PtrCode + offTrg) - (u32IDCode + offSrc + 4));
    1047                 break;
    1048             }
    1049 
    1050             /*
    1051              * 32-bit relative, source in ID and target in HC.
    1052              */
    1053             case FIX_ID_2_GC_NEAR_REL:
    1054             {
    1055                 Assert(offSrc - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offSrc - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
    1056                 uint32_t offTrg = *u.pu32++;
    1057                 Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode);
    1058                 *uSrc.pu32 = (uint32_t)((GCPtrCode + offTrg) - (u32IDCode + offSrc + 4));
    1059                 break;
    1060             }
    1061 
    1062             /*
    1063              * 16:32 far jump, target in GC.
    1064              */
    1065             case FIX_GC_FAR32:
    1066             {
    1067                 uint32_t offTrg = *u.pu32++;
    1068                 Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode);
    1069                 *uSrc.pu32++ = (uint32_t)(GCPtrCode + offTrg);
    1070                 *uSrc.pu16++ = SelCS;
    1071                 break;
    1072             }
    1073 
    1074             /*
    1075              * Make 32-bit GC pointer given CPUM offset.
    1076              */
    1077             case FIX_GC_CPUM_OFF:
    1078             {
    1079                 uint32_t offCPUM = *u.pu32++;
    1080                 Assert(offCPUM < sizeof(pVM->cpum));
    1081                 *uSrc.pu32 = (uint32_t)(VM_GUEST_ADDR(pVM, &pVM->cpum) + offCPUM);
    1082                 break;
    1083             }
    1084 
    1085             /*
    1086              * Make 32-bit GC pointer given VM offset.
    1087              */
    1088             case FIX_GC_VM_OFF:
    1089             {
    1090                 uint32_t offVM = *u.pu32++;
    1091                 Assert(offVM < sizeof(VM));
    1092                 *uSrc.pu32 = (uint32_t)(VM_GUEST_ADDR(pVM, pVM) + offVM);
    1093                 break;
    1094             }
    1095 
    1096             /*
    1097              * Make 32-bit HC pointer given CPUM offset.
    1098              */
    1099             case FIX_HC_CPUM_OFF:
    1100             {
    1101                 uint32_t offCPUM = *u.pu32++;
    1102                 Assert(offCPUM < sizeof(pVM->cpum));
    1103                 *uSrc.pu32 = (uint32_t)pVM->pVMR0 + RT_OFFSETOF(VM, cpum) + offCPUM;
    1104                 break;
    1105             }
    1106 
    1107             /*
    1108              * Make 32-bit R0 pointer given VM offset.
    1109              */
    1110             case FIX_HC_VM_OFF:
    1111             {
    1112                 uint32_t offVM = *u.pu32++;
    1113                 Assert(offVM < sizeof(VM));
    1114                 *uSrc.pu32 = (uint32_t)pVM->pVMR0 + offVM;
    1115                 break;
    1116             }
    1117 
    1118             /*
    1119              * Store the 32-Bit CR3 (32-bit) for the intermediate memory context.
    1120              */
    1121             case FIX_INTER_32BIT_CR3:
    1122             {
    1123 
    1124                 *uSrc.pu32 = PGMGetInter32BitCR3(pVM);
    1125                 break;
    1126             }
    1127 
    1128             /*
    1129              * Store the PAE CR3 (32-bit) for the intermediate memory context.
    1130              */
    1131             case FIX_INTER_PAE_CR3:
    1132             {
    1133 
    1134                 *uSrc.pu32 = PGMGetInterPaeCR3(pVM);
    1135                 break;
    1136             }
    1137 
    1138             /*
    1139              * Store the AMD64 CR3 (32-bit) for the intermediate memory context.
    1140              */
    1141             case FIX_INTER_AMD64_CR3:
    1142             {
    1143 
    1144                 *uSrc.pu32 = PGMGetInterAmd64CR3(pVM);
    1145                 break;
    1146             }
    1147 
    1148             /*
    1149              * Store the 32-Bit CR3 (32-bit) for the hypervisor (shadow) memory context.
    1150              */
    1151             case FIX_HYPER_32BIT_CR3:
    1152             {
    1153 
    1154                 *uSrc.pu32 = PGMGetHyper32BitCR3(pVM);
    1155                 break;
    1156             }
    1157 
    1158             /*
    1159              * Store the PAE CR3 (32-bit) for the hypervisor (shadow) memory context.
    1160              */
    1161             case FIX_HYPER_PAE_CR3:
    1162             {
    1163 
    1164                 *uSrc.pu32 = PGMGetHyperPaeCR3(pVM);
    1165                 break;
    1166             }
    1167 
    1168             /*
    1169              * Store the AMD64 CR3 (32-bit) for the hypervisor (shadow) memory context.
    1170              */
    1171             case FIX_HYPER_AMD64_CR3:
    1172             {
    1173 
    1174                 *uSrc.pu32 = PGMGetHyperAmd64CR3(pVM);
    1175                 break;
    1176             }
    1177 
    1178             /*
    1179              * Store Hypervisor CS (16-bit).
    1180              */
    1181             case FIX_HYPER_CS:
    1182             {
    1183                 *uSrc.pu16 = SelCS;
    1184                 break;
    1185             }
    1186 
    1187             /*
    1188              * Store Hypervisor DS (16-bit).
    1189              */
    1190             case FIX_HYPER_DS:
    1191             {
    1192                 *uSrc.pu16 = SelDS;
    1193                 break;
    1194             }
    1195 
    1196             /*
    1197              * Store Hypervisor TSS (16-bit).
    1198              */
    1199             case FIX_HYPER_TSS:
    1200             {
    1201                 *uSrc.pu16 = SelTSS;
    1202                 break;
    1203             }
    1204 
    1205             /*
    1206              * Store the 32-bit GC address of the 2nd dword of the TSS descriptor (in the GDT).
    1207              */
    1208             case FIX_GC_TSS_GDTE_DW2:
    1209             {
    1210                 RTGCPTR GCPtr = GCPtrGDT + (SelTSS & ~7) + 4;
    1211                 *uSrc.pu32 = (uint32_t)GCPtr;
    1212                 break;
    1213             }
    1214 
    1215 
    1216             ///@todo case FIX_CR4_MASK:
    1217             ///@todo case FIX_CR4_OSFSXR:
    1218 
    1219             /*
    1220              * Insert relative jump to specified target it FXSAVE/FXRSTOR isn't supported by the cpu.
    1221              */
    1222             case FIX_NO_FXSAVE_JMP:
    1223             {
    1224                 uint32_t offTrg = *u.pu32++;
    1225                 Assert(offTrg < pSwitcher->cbCode);
    1226                 if (!CPUMSupportsFXSR(pVM))
    1227                 {
    1228                     *uSrc.pu8++ = 0xe9; /* jmp rel32 */
    1229                     *uSrc.pu32++ = offTrg - (offSrc + 5);
    1230                 }
    1231                 else
    1232                 {
    1233                     *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc);
    1234                     *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1);
    1235                 }
    1236                 break;
    1237             }
    1238 
    1239             /*
    1240              * Insert relative jump to specified target it SYSENTER isn't used by the host.
    1241              */
    1242             case FIX_NO_SYSENTER_JMP:
    1243             {
    1244                 uint32_t offTrg = *u.pu32++;
    1245                 Assert(offTrg < pSwitcher->cbCode);
    1246                 if (!CPUMIsHostUsingSysEnter(pVM))
    1247                 {
    1248                     *uSrc.pu8++ = 0xe9; /* jmp rel32 */
    1249                     *uSrc.pu32++ = offTrg - (offSrc + 5);
    1250                 }
    1251                 else
    1252                 {
    1253                     *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc);
    1254                     *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1);
    1255                 }
    1256                 break;
    1257             }
    1258 
    1259             /*
    1260              * Insert relative jump to specified target it SYSENTER isn't used by the host.
    1261              */
    1262             case FIX_NO_SYSCALL_JMP:
    1263             {
    1264                 uint32_t offTrg = *u.pu32++;
    1265                 Assert(offTrg < pSwitcher->cbCode);
    1266                 if (!CPUMIsHostUsingSysEnter(pVM))
    1267                 {
    1268                     *uSrc.pu8++ = 0xe9; /* jmp rel32 */
    1269                     *uSrc.pu32++ = offTrg - (offSrc + 5);
    1270                 }
    1271                 else
    1272                 {
    1273                     *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc);
    1274                     *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1);
    1275                 }
    1276                 break;
    1277             }
    1278 
    1279             /*
    1280              * 32-bit HC pointer fixup to (HC) target within the code (32-bit offset).
    1281              */
    1282             case FIX_HC_32BIT:
    1283             {
    1284                 uint32_t offTrg = *u.pu32++;
    1285                 Assert(offSrc < pSwitcher->cbCode);
    1286                 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
    1287                 *uSrc.pu32 = R0PtrCode + offTrg;
    1288                 break;
    1289             }
    1290 
    1291 #if defined(RT_ARCH_AMD64) || defined(VBOX_WITH_HYBIRD_32BIT_KERNEL)
    1292             /*
    1293              * 64-bit HC pointer fixup to (HC) target within the code (32-bit offset).
    1294              */
    1295             case FIX_HC_64BIT:
    1296             {
    1297                 uint32_t offTrg = *u.pu32++;
    1298                 Assert(offSrc < pSwitcher->cbCode);
    1299                 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
    1300                 *uSrc.pu64 = R0PtrCode + offTrg;
    1301                 break;
    1302             }
    1303 
    1304             /*
    1305              * 64-bit HC Code Selector (no argument).
    1306              */
    1307             case FIX_HC_64BIT_CS:
    1308             {
    1309                 Assert(offSrc < pSwitcher->cbCode);
    1310 #if defined(RT_OS_DARWIN) && defined(VBOX_WITH_HYBIRD_32BIT_KERNEL)
    1311                 *uSrc.pu16 = 0x80; /* KERNEL64_CS from i386/seg.h */
    1312 #else
    1313                 AssertFatalMsgFailed(("FIX_HC_64BIT_CS not implemented for this host\n"));
    1314 #endif
    1315                 break;
    1316             }
    1317 
    1318             /*
    1319              * 64-bit HC pointer to the CPUM instance data (no argument).
    1320              */
    1321             case FIX_HC_64BIT_CPUM:
    1322             {
    1323                 Assert(offSrc < pSwitcher->cbCode);
    1324                 *uSrc.pu64 = pVM->pVMR0 + RT_OFFSETOF(VM, cpum);
    1325                 break;
    1326             }
    1327 #endif
    1328 
    1329             /*
    1330              * 32-bit ID pointer to (ID) target within the code (32-bit offset).
    1331              */
    1332             case FIX_ID_32BIT:
    1333             {
    1334                 uint32_t offTrg = *u.pu32++;
    1335                 Assert(offSrc < pSwitcher->cbCode);
    1336                 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
    1337                 *uSrc.pu32 = u32IDCode + offTrg;
    1338                 break;
    1339             }
    1340 
    1341             /*
    1342              * 64-bit ID pointer to (ID) target within the code (32-bit offset).
    1343              */
    1344             case FIX_ID_64BIT:
    1345             {
    1346                 uint32_t offTrg = *u.pu32++;
    1347                 Assert(offSrc < pSwitcher->cbCode);
    1348                 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
    1349                 *uSrc.pu64 = u32IDCode + offTrg;
    1350                 break;
    1351             }
    1352 
    1353             /*
    1354              * Far 16:32 ID pointer to 64-bit mode (ID) target within the code (32-bit offset).
    1355              */
    1356             case FIX_ID_FAR32_TO_64BIT_MODE:
    1357             {
    1358                 uint32_t offTrg = *u.pu32++;
    1359                 Assert(offSrc < pSwitcher->cbCode);
    1360                 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
    1361                 *uSrc.pu32++ = u32IDCode + offTrg;
    1362                 *uSrc.pu16 = SelCS64;
    1363                 AssertRelease(SelCS64);
    1364                 break;
    1365             }
    1366 
    1367 #ifdef VBOX_WITH_NMI
    1368             /*
    1369              * 32-bit address to the APIC base.
    1370              */
    1371             case FIX_GC_APIC_BASE_32BIT:
    1372             {
    1373                 *uSrc.pu32 = pVM->vmm.s.GCPtrApicBase;
    1374                 break;
    1375             }
    1376 #endif
    1377 
    1378             default:
    1379                 AssertReleaseMsgFailed(("Unknown fixup %d in switcher %s\n", u8, pSwitcher->pszDesc));
    1380                 break;
    1381         }
    1382     }
    1383 
    1384 #ifdef LOG_ENABLED
    1385     /*
    1386      * If Log2 is enabled disassemble the switcher code.
    1387      *
    1388      * The switcher code have 1-2 HC parts, 1 GC part and 0-2 ID parts.
    1389      */
    1390     if (LogIs2Enabled())
    1391     {
    1392         RTLogPrintf("*** Disassembly of switcher %d '%s' %#x bytes ***\n"
    1393                     "   R0PtrCode   = %p\n"
    1394                     "   pu8CodeR3   = %p\n"
    1395                     "   GCPtrCode   = %VGv\n"
    1396                     "   u32IDCode   = %08x\n"
    1397                     "   pVMGC       = %VGv\n"
    1398                     "   pCPUMGC     = %VGv\n"
    1399                     "   pVMHC       = %p\n"
    1400                     "   pCPUMHC     = %p\n"
    1401                     "   GCPtrGDT    = %VGv\n"
    1402                     "   InterCR3s   = %08x, %08x, %08x (32-Bit, PAE, AMD64)\n"
    1403                     "   HyperCR3s   = %08x, %08x, %08x (32-Bit, PAE, AMD64)\n"
    1404                     "   SelCS       = %04x\n"
    1405                     "   SelDS       = %04x\n"
    1406                     "   SelCS64     = %04x\n"
    1407                     "   SelTSS      = %04x\n",
    1408                     pSwitcher->enmType, pSwitcher->pszDesc, pSwitcher->cbCode,
    1409                     R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode, VM_GUEST_ADDR(pVM, pVM),
    1410                     VM_GUEST_ADDR(pVM, &pVM->cpum), pVM, &pVM->cpum,
    1411                     GCPtrGDT,
    1412                     PGMGetHyper32BitCR3(pVM), PGMGetHyperPaeCR3(pVM), PGMGetHyperAmd64CR3(pVM),
    1413                     PGMGetInter32BitCR3(pVM), PGMGetInterPaeCR3(pVM), PGMGetInterAmd64CR3(pVM),
    1414                     SelCS, SelDS, SelCS64, SelTSS);
    1415 
    1416         uint32_t offCode = 0;
    1417         while (offCode < pSwitcher->cbCode)
    1418         {
    1419             /*
    1420              * Figure out where this is.
    1421              */
    1422             const char *pszDesc = NULL;
    1423             RTUINTPTR   uBase;
    1424             uint32_t    cbCode;
    1425             if (offCode - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0)
    1426             {
    1427                 pszDesc = "HCCode0";
    1428                 uBase   = R0PtrCode;
    1429                 offCode = pSwitcher->offHCCode0;
    1430                 cbCode  = pSwitcher->cbHCCode0;
    1431             }
    1432             else if (offCode - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1)
    1433             {
    1434                 pszDesc = "HCCode1";
    1435                 uBase   = R0PtrCode;
    1436                 offCode = pSwitcher->offHCCode1;
    1437                 cbCode  = pSwitcher->cbHCCode1;
    1438             }
    1439             else if (offCode - pSwitcher->offGCCode < pSwitcher->cbGCCode)
    1440             {
    1441                 pszDesc = "GCCode";
    1442                 uBase   = GCPtrCode;
    1443                 offCode = pSwitcher->offGCCode;
    1444                 cbCode  = pSwitcher->cbGCCode;
    1445             }
    1446             else if (offCode - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0)
    1447             {
    1448                 pszDesc = "IDCode0";
    1449                 uBase   = u32IDCode;
    1450                 offCode = pSwitcher->offIDCode0;
    1451                 cbCode  = pSwitcher->cbIDCode0;
    1452             }
    1453             else if (offCode - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1)
    1454             {
    1455                 pszDesc = "IDCode1";
    1456                 uBase   = u32IDCode;
    1457                 offCode = pSwitcher->offIDCode1;
    1458                 cbCode  = pSwitcher->cbIDCode1;
    1459             }
    1460             else
    1461             {
    1462                 RTLogPrintf("  %04x: %02x '%c' (nowhere)\n",
    1463                             offCode, pu8CodeR3[offCode], isprint(pu8CodeR3[offCode]) ? pu8CodeR3[offCode] : ' ');
    1464                 offCode++;
    1465                 continue;
    1466             }
    1467 
    1468             /*
    1469              * Disassemble it.
    1470              */
    1471             RTLogPrintf("  %s: offCode=%#x cbCode=%#x\n", pszDesc, offCode, cbCode);
    1472             DISCPUSTATE Cpu;
    1473 
    1474             memset(&Cpu, 0, sizeof(Cpu));
    1475             Cpu.mode = CPUMODE_32BIT;
    1476             while (cbCode > 0)
    1477             {
    1478                 /* try label it */
    1479                 if (pSwitcher->offR0HostToGuest == offCode)
    1480                     RTLogPrintf(" *R0HostToGuest:\n");
    1481                 if (pSwitcher->offGCGuestToHost == offCode)
    1482                     RTLogPrintf(" *GCGuestToHost:\n");
    1483                 if (pSwitcher->offGCCallTrampoline == offCode)
    1484                     RTLogPrintf(" *GCCallTrampoline:\n");
    1485                 if (pSwitcher->offGCGuestToHostAsm == offCode)
    1486                     RTLogPrintf(" *GCGuestToHostAsm:\n");
    1487                 if (pSwitcher->offGCGuestToHostAsmHyperCtx == offCode)
    1488                     RTLogPrintf(" *GCGuestToHostAsmHyperCtx:\n");
    1489                 if (pSwitcher->offGCGuestToHostAsmGuestCtx == offCode)
    1490                     RTLogPrintf(" *GCGuestToHostAsmGuestCtx:\n");
    1491 
    1492                 /* disas */
    1493                 uint32_t cbInstr = 0;
    1494                 char szDisas[256];
    1495                 if (RT_SUCCESS(DISInstr(&Cpu, (RTUINTPTR)pu8CodeR3 + offCode, uBase - (RTUINTPTR)pu8CodeR3, &cbInstr, szDisas)))
    1496                     RTLogPrintf("  %04x: %s", offCode, szDisas); //for whatever reason szDisas includes '\n'.
    1497                 else
    1498                 {
    1499                     RTLogPrintf("  %04x: %02x '%c'\n",
    1500                                 offCode, pu8CodeR3[offCode], isprint(pu8CodeR3[offCode]) ? pu8CodeR3[offCode] : ' ');
    1501                     cbInstr = 1;
    1502                 }
    1503                 offCode += cbInstr;
    1504                 cbCode -= RT_MIN(cbInstr, cbCode);
    1505             }
    1506         }
    1507     }
    1508 #endif
    1509 }
    1510 
    1511 
    1512 /**
    1513  * Relocator for the 32-Bit to 32-Bit world switcher.
    1514  */
    1515 DECLCALLBACK(void) vmmR3Switcher32BitTo32Bit_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
    1516 {
    1517     vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
    1518                                  SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
    1519 }
    1520 
    1521 
    1522 /**
    1523  * Relocator for the 32-Bit to PAE world switcher.
    1524  */
    1525 DECLCALLBACK(void) vmmR3Switcher32BitToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
    1526 {
    1527     vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
    1528                                  SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
    1529 }
    1530 
    1531 
    1532 /**
    1533  * Relocator for the PAE to 32-Bit world switcher.
    1534  */
    1535 DECLCALLBACK(void) vmmR3SwitcherPAETo32Bit_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
    1536 {
    1537     vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
    1538                                  SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
    1539 }
    1540 
    1541 
    1542 /**
    1543  * Relocator for the PAE to PAE world switcher.
    1544  */
    1545 DECLCALLBACK(void) vmmR3SwitcherPAEToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
    1546 {
    1547     vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
    1548                                  SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
    1549 }
    1550 
    1551 
    1552 /**
    1553  * Relocator for the AMD64 to PAE world switcher.
    1554  */
    1555 DECLCALLBACK(void) vmmR3SwitcherAMD64ToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
    1556 {
    1557     vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
    1558                                  SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), SELMGetHyperCS64(pVM));
    1559741}
    1560742
     
    1664846        return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
    1665847    }
    1666     return VINF_SUCCESS;
    1667 }
    1668 
    1669 
    1670 /**
    1671  * Selects the switcher to be used for switching to GC.
    1672  *
    1673  * @returns VBox status code.
    1674  * @param   pVM             VM handle.
    1675  * @param   enmSwitcher     The new switcher.
    1676  * @remark  This function may be called before the VMM is initialized.
    1677  */
    1678 VMMR3DECL(int) VMMR3SelectSwitcher(PVM pVM, VMMSWITCHER enmSwitcher)
    1679 {
    1680     /*
    1681      * Validate input.
    1682      */
    1683     if (    enmSwitcher < VMMSWITCHER_INVALID
    1684         ||  enmSwitcher >= VMMSWITCHER_MAX)
    1685     {
    1686         AssertMsgFailed(("Invalid input enmSwitcher=%d\n", enmSwitcher));
    1687         return VERR_INVALID_PARAMETER;
    1688     }
    1689 
    1690     /* Do nothing if the switcher is disabled. */
    1691     if (pVM->vmm.s.fSwitcherDisabled)
    1692         return VINF_SUCCESS;
    1693 
    1694     /*
    1695      * Select the new switcher.
    1696      */
    1697     PVMMSWITCHERDEF pSwitcher = s_apSwitchers[enmSwitcher];
    1698     if (pSwitcher)
    1699     {
    1700         Log(("VMMR3SelectSwitcher: enmSwitcher %d -> %d %s\n", pVM->vmm.s.enmSwitcher, enmSwitcher, pSwitcher->pszDesc));
    1701         pVM->vmm.s.enmSwitcher = enmSwitcher;
    1702 
    1703         RTR0PTR     pbCodeR0 = (RTR0PTR)pVM->vmm.s.pvCoreCodeR0 + pVM->vmm.s.aoffSwitchers[enmSwitcher]; /** @todo fix the pvCoreCodeR0 type */
    1704         pVM->vmm.s.pfnHostToGuestR0 = pbCodeR0 + pSwitcher->offR0HostToGuest;
    1705 
    1706         RTGCPTR     GCPtr = pVM->vmm.s.pvCoreCodeRC + pVM->vmm.s.aoffSwitchers[enmSwitcher];
    1707         pVM->vmm.s.pfnGuestToHostRC         = GCPtr + pSwitcher->offGCGuestToHost;
    1708         pVM->vmm.s.pfnCallTrampolineRC      = GCPtr + pSwitcher->offGCCallTrampoline;
    1709         pVM->pfnVMMGCGuestToHostAsm         = GCPtr + pSwitcher->offGCGuestToHostAsm;
    1710         pVM->pfnVMMGCGuestToHostAsmHyperCtx = GCPtr + pSwitcher->offGCGuestToHostAsmHyperCtx;
    1711         pVM->pfnVMMGCGuestToHostAsmGuestCtx = GCPtr + pSwitcher->offGCGuestToHostAsmGuestCtx;
    1712         return VINF_SUCCESS;
    1713     }
    1714     return VERR_NOT_IMPLEMENTED;
    1715 }
    1716 
    1717 /**
    1718  * Disable the switcher logic permanently.
    1719  *
    1720  * @returns VBox status code.
    1721  * @param   pVM             VM handle.
    1722  */
    1723 VMMR3DECL(int) VMMR3DisableSwitcher(PVM pVM)
    1724 {
    1725 /** @todo r=bird: I would suggest that we create a dummy switcher which just does something like:
    1726  * @code
    1727  *       mov eax, VERR_INTERNAL_ERROR
    1728  *       ret
    1729  * @endcode
    1730  * And then check for fSwitcherDisabled in VMMR3SelectSwitcher() in order to prevent it from being removed.
    1731  */
    1732     pVM->vmm.s.fSwitcherDisabled = true;
    1733848    return VINF_SUCCESS;
    1734849}
  • trunk/src/VBox/VMM/VMMInternal.h

    r13796 r13798  
    423423__BEGIN_DECLS
    424424
     425#ifdef IN_RING3
     426int  vmmR3SwitcherInit(PVM pVM);
     427void vmmR3SwitcherRelocate(PVM pVM, RTGCINTPTR offDelta);
     428#endif /* IN_RING3 */
    425429
    426430#ifdef IN_RING0
  • trunk/src/VBox/VMM/VMMSwitcher.cpp

    r13796 r13798  
    11/* $Id$ */
    22/** @file
    3  * VMM - The Virtual Machine Monitor Core.
     3 * VMM - The Virtual Machine Monitor, World Switcher(s).
    44 */
    55
     
    1818 * Clara, CA 95054 USA or visit http://www.sun.com if you need
    1919 * additional information or have any questions.
    20  */
    21 
    22 //#define NO_SUPCALLR0VMM
    23 
    24 /** @page pg_vmm        VMM - The Virtual Machine Monitor
    25  *
    26  * The VMM component is two things at the moment, it's a component doing a few
    27  * management and routing tasks, and it's the whole virtual machine monitor
    28  * thing.  For hysterical reasons, it is not doing all the management that one
    29  * would expect, this is instead done by @ref pg_vm.  We'll address this
    30  * misdesign eventually.
    31  *
    32  * @see grp_vmm, grp_vm
    33  *
    34  *
    35  * @section sec_vmmstate        VMM State
    36  *
    37  * @image html VM_Statechart_Diagram.gif
    38  *
    39  * To be written.
    40  *
    41  *
    42  * @subsection  subsec_vmm_init     VMM Initialization
    43  *
    44  * To be written.
    45  *
    46  *
    47  * @subsection  subsec_vmm_term     VMM Termination
    48  *
    49  * To be written.
    50  *
    5120 */
    5221
     
    133102
    134103
    135 /*******************************************************************************
    136 *   Internal Functions                                                         *
    137 *******************************************************************************/
    138 static int                  vmmR3InitCoreCode(PVM pVM);
    139 static int                  vmmR3InitStacks(PVM pVM);
    140 static int                  vmmR3InitLoggers(PVM pVM);
    141 static void                 vmmR3InitRegisterStats(PVM pVM);
    142 static DECLCALLBACK(int)    vmmR3Save(PVM pVM, PSSMHANDLE pSSM);
    143 static DECLCALLBACK(int)    vmmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
    144 static DECLCALLBACK(void)   vmmR3YieldEMT(PVM pVM, PTMTIMER pTimer, void *pvUser);
    145 static int                  vmmR3ServiceCallHostRequest(PVM pVM);
    146 static DECLCALLBACK(void)   vmmR3InfoFF(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
    147 
    148 
    149104/**
    150  * Initializes the VMM.
    151  *
    152  * @returns VBox status code.
    153  * @param   pVM         The VM to operate on.
    154  */
    155 VMMR3DECL(int) VMMR3Init(PVM pVM)
    156 {
    157     LogFlow(("VMMR3Init\n"));
    158 
    159     /*
    160      * Assert alignment, sizes and order.
    161      */
    162     AssertMsg(pVM->vmm.s.offVM == 0, ("Already initialized!\n"));
    163     AssertMsg(sizeof(pVM->vmm.padding) >= sizeof(pVM->vmm.s),
    164               ("pVM->vmm.padding is too small! vmm.padding %d while vmm.s is %d\n",
    165                sizeof(pVM->vmm.padding), sizeof(pVM->vmm.s)));
    166 
    167     /*
    168      * Init basic VM VMM members.
    169      */
    170     pVM->vmm.s.offVM = RT_OFFSETOF(VM, vmm);
    171     int rc = CFGMR3QueryU32(CFGMR3GetRoot(pVM), "YieldEMTInterval", &pVM->vmm.s.cYieldEveryMillies);
    172     if (rc == VERR_CFGM_VALUE_NOT_FOUND)
    173         pVM->vmm.s.cYieldEveryMillies = 23; /* Value arrived at after experimenting with the grub boot prompt. */
    174         //pVM->vmm.s.cYieldEveryMillies = 8; //debugging
    175     else
    176         AssertMsgRCReturn(rc, ("Configuration error. Failed to query \"YieldEMTInterval\", rc=%Vrc\n", rc), rc);
    177 
    178     /* GC switchers are enabled by default. Turned off by HWACCM. */
    179     pVM->vmm.s.fSwitcherDisabled = false;
    180 
    181     /*
    182      * Register the saved state data unit.
    183      */
    184     rc = SSMR3RegisterInternal(pVM, "vmm", 1, VMM_SAVED_STATE_VERSION, VMM_STACK_SIZE + sizeof(RTGCPTR),
    185                                NULL, vmmR3Save, NULL,
    186                                NULL, vmmR3Load, NULL);
    187     if (VBOX_FAILURE(rc))
    188         return rc;
    189 
    190     /*
    191      * Register the Ring-0 VM handle with the session for fast ioctl calls.
    192      */
    193     rc = SUPSetVMForFastIOCtl(pVM->pVMR0);
    194     if (VBOX_FAILURE(rc))
    195         return rc;
    196 
    197     /*
    198      * Init various sub-components.
    199      */
    200     rc = vmmR3InitCoreCode(pVM);
    201     if (RT_SUCCESS(rc))
    202     {
    203         rc = vmmR3InitStacks(pVM);
    204         if (RT_SUCCESS(rc))
    205         {
    206             rc = vmmR3InitLoggers(pVM);
    207 
    208 #ifdef VBOX_WITH_NMI
    209             /*
    210              * Allocate mapping for the host APIC.
    211              */
    212             if (RT_SUCCESS(rc))
    213             {
    214                 rc = MMR3HyperReserve(pVM, PAGE_SIZE, "Host APIC", &pVM->vmm.s.GCPtrApicBase);
    215                 AssertRC(rc);
    216             }
    217 #endif
    218             if (RT_SUCCESS(rc))
    219             {
    220                 rc = RTCritSectInit(&pVM->vmm.s.CritSectVMLock);
    221                 if (VBOX_SUCCESS(rc))
    222                 {
    223                     /*
    224                      * Debug info and statistics.
    225                      */
    226                     DBGFR3InfoRegisterInternal(pVM, "ff", "Displays the current Forced actions Flags.", vmmR3InfoFF);
    227                     vmmR3InitRegisterStats(pVM);
    228 
    229                     return VINF_SUCCESS;
    230                 }
    231             }
    232         }
    233         /** @todo: Need failure cleanup. */
    234 
    235         //more todo in here?
    236         //if (VBOX_SUCCESS(rc))
    237         //{
    238         //}
    239         //int rc2 = vmmR3TermCoreCode(pVM);
    240         //AssertRC(rc2));
    241     }
    242 
    243     return rc;
    244 }
    245 
    246 
    247 /**
    248  * Initializes the per-VCPU VMM.
    249  *
    250  * @returns VBox status code.
    251  * @param   pVM         The VM to operate on.
    252  */
    253 VMMR3DECL(int) VMMR3InitCPU(PVM pVM)
    254 {
    255     LogFlow(("VMMR3InitCPU\n"));
    256     return VINF_SUCCESS;
    257 }
    258 
    259 
    260 /**
    261  * VMMR3Init worker that initiates the core code.
     105 * VMMR3Init worker that initiates the switcher code (aka core code).
    262106 *
    263107 * This is core per VM code which might need fixups and/or for ease of use are
     
    267111 * @param   pVM     Pointer to the shared VM structure.
    268112 */
    269 static int vmmR3InitCoreCode(PVM pVM)
     113int vmmR3SwitcherInit(PVM pVM)
    270114{
    271115    /*
     
    392236
    393237/**
    394  * Allocate & setup the VMM RC stack(s) (for EMTs).
     238 * Relocate the switchers, called by VMMR#Relocate.
    395239 *
    396  * The stacks are also used for long jumps in Ring-0.
    397  *
    398  * @returns VBox status code.
    399  * @param   pVM     Pointer to the shared VM structure.
    400  *
    401  * @remarks The optional guard page gets it protection setup up during R3 init
    402  *          completion because of init order issues.
    403  */
    404 static int vmmR3InitStacks(PVM pVM)
    405 {
    406     /** @todo SMP: On stack per vCPU. */
    407 #ifdef VBOX_STRICT_VMM_STACK
    408     int rc = MMHyperAlloc(pVM, VMM_STACK_SIZE + PAGE_SIZE + PAGE_SIZE, PAGE_SIZE, MM_TAG_VMM, (void **)&pVM->vmm.s.pbEMTStackR3);
    409 #else
    410     int rc = MMHyperAlloc(pVM, VMM_STACK_SIZE, PAGE_SIZE, MM_TAG_VMM, (void **)&pVM->vmm.s.pbEMTStackR3);
    411 #endif
    412     if (VBOX_SUCCESS(rc))
    413     {
    414         pVM->vmm.s.CallHostR0JmpBuf.pvSavedStack = MMHyperR3ToR0(pVM, pVM->vmm.s.pbEMTStackR3);
    415         pVM->vmm.s.pbEMTStackRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pbEMTStackR3);
    416         pVM->vmm.s.pbEMTStackBottomRC = pVM->vmm.s.pbEMTStackRC + VMM_STACK_SIZE;
    417         AssertRelease(pVM->vmm.s.pbEMTStackRC);
    418 
    419         CPUMSetHyperESP(pVM, pVM->vmm.s.pbEMTStackBottomRC);
    420     }
    421 
    422     return rc;
    423 }
    424 
    425 
    426 /**
    427  * Initialize the loggers.
    428  *
    429  * @returns VBox status code.
    430240 * @param   pVM         Pointer to the shared VM structure.
    431  */
    432 static int vmmR3InitLoggers(PVM pVM)
    433 {
    434     int rc;
    435 
     241 * @param   offDelta    The relocation delta.
     242 */
     243void vmmR3SwitcherRelocate(PVM pVM, RTGCINTPTR offDelta)
     244{
    436245    /*
    437      * Allocate RC & R0 Logger instances (they are finalized in the relocator).
    438      */
    439 #ifdef LOG_ENABLED
    440     PRTLOGGER pLogger = RTLogDefaultInstance();
    441     if (pLogger)
    442     {
    443         pVM->vmm.s.cbRCLogger = RT_OFFSETOF(RTLOGGERRC, afGroups[pLogger->cGroups]);
    444         rc = MMHyperAlloc(pVM, pVM->vmm.s.cbRCLogger, 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pRCLoggerR3);
    445         if (RT_FAILURE(rc))
    446             return rc;
    447         pVM->vmm.s.pRCLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCLoggerR3);
    448 
    449 # ifdef VBOX_WITH_R0_LOGGING
    450         rc = MMHyperAlloc(pVM, RT_OFFSETOF(VMMR0LOGGER, Logger.afGroups[pLogger->cGroups]),
    451                           0, MM_TAG_VMM, (void **)&pVM->vmm.s.pR0LoggerR3);
    452         if (RT_FAILURE(rc))
    453             return rc;
    454         pVM->vmm.s.pR0LoggerR3->pVM = pVM->pVMR0;
    455         //pVM->vmm.s.pR0LoggerR3->fCreated = false;
    456         pVM->vmm.s.pR0LoggerR3->cbLogger = RT_OFFSETOF(RTLOGGER, afGroups[pLogger->cGroups]);
    457         pVM->vmm.s.pR0LoggerR0 = MMHyperR3ToR0(pVM, pVM->vmm.s.pR0LoggerR3);
    458 # endif
    459     }
    460 #endif /* LOG_ENABLED */
    461 
    462 #ifdef VBOX_WITH_RC_RELEASE_LOGGING
    463     /*
    464      * Allocate RC release logger instances (finalized in the relocator).
    465      */
    466     PRTLOGGER pRelLogger = RTLogRelDefaultInstance();
    467     if (pRelLogger)
    468     {
    469         pVM->vmm.s.cbRCRelLogger = RT_OFFSETOF(RTLOGGERRC, afGroups[pRelLogger->cGroups]);
    470         rc = MMHyperAlloc(pVM, pVM->vmm.s.cbRCRelLogger, 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pRCRelLoggerR3);
    471         if (RT_FAILURE(rc))
    472             return rc;
    473         pVM->vmm.s.pRCRelLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCRelLoggerR3);
    474     }
    475 #endif /* VBOX_WITH_RC_RELEASE_LOGGING */
    476     return VINF_SUCCESS;
    477 }
    478 
    479 
    480 /**
    481  * VMMR3Init worker that register the statistics with STAM.
    482  *
    483  * @param   pVM         The shared VM structure.
    484  */
    485 static void vmmR3InitRegisterStats(PVM pVM)
    486 {
    487     /*
    488      * Statistics.
    489      */
    490     STAM_REG(pVM, &pVM->vmm.s.StatRunRC,                    STAMTYPE_COUNTER, "/VMM/RunRC",                     STAMUNIT_OCCURENCES, "Number of context switches.");
    491     STAM_REG(pVM, &pVM->vmm.s.StatRZRetNormal,              STAMTYPE_COUNTER, "/VMM/RZRet/Normal",              STAMUNIT_OCCURENCES, "Number of VINF_SUCCESS returns.");
    492     STAM_REG(pVM, &pVM->vmm.s.StatRZRetInterrupt,           STAMTYPE_COUNTER, "/VMM/RZRet/Interrupt",           STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT returns.");
    493     STAM_REG(pVM, &pVM->vmm.s.StatRZRetInterruptHyper,      STAMTYPE_COUNTER, "/VMM/RZRet/InterruptHyper",      STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT_HYPER returns.");
    494     STAM_REG(pVM, &pVM->vmm.s.StatRZRetGuestTrap,           STAMTYPE_COUNTER, "/VMM/RZRet/GuestTrap",           STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_GUEST_TRAP returns.");
    495     STAM_REG(pVM, &pVM->vmm.s.StatRZRetRingSwitch,          STAMTYPE_COUNTER, "/VMM/RZRet/RingSwitch",          STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_RING_SWITCH returns.");
    496     STAM_REG(pVM, &pVM->vmm.s.StatRZRetRingSwitchInt,       STAMTYPE_COUNTER, "/VMM/RZRet/RingSwitchInt",       STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_RING_SWITCH_INT returns.");
    497     STAM_REG(pVM, &pVM->vmm.s.StatRZRetExceptionPrivilege,  STAMTYPE_COUNTER, "/VMM/RZRet/ExceptionPrivilege",  STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_EXCEPTION_PRIVILEGED returns.");
    498     STAM_REG(pVM, &pVM->vmm.s.StatRZRetStaleSelector,       STAMTYPE_COUNTER, "/VMM/RZRet/StaleSelector",       STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_STALE_SELECTOR returns.");
    499     STAM_REG(pVM, &pVM->vmm.s.StatRZRetIRETTrap,            STAMTYPE_COUNTER, "/VMM/RZRet/IRETTrap",            STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_IRET_TRAP returns.");
    500     STAM_REG(pVM, &pVM->vmm.s.StatRZRetEmulate,             STAMTYPE_COUNTER, "/VMM/RZRet/Emulate",             STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION returns.");
    501     STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchEmulate,        STAMTYPE_COUNTER, "/VMM/RZRet/PatchEmulate",        STAMUNIT_OCCURENCES, "Number of VINF_PATCH_EMULATE_INSTR returns.");
    502     STAM_REG(pVM, &pVM->vmm.s.StatRZRetIORead,              STAMTYPE_COUNTER, "/VMM/RZRet/IORead",              STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_IOPORT_READ returns.");
    503     STAM_REG(pVM, &pVM->vmm.s.StatRZRetIOWrite,             STAMTYPE_COUNTER, "/VMM/RZRet/IOWrite",             STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_IOPORT_WRITE returns.");
    504     STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIORead,            STAMTYPE_COUNTER, "/VMM/RZRet/MMIORead",            STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_READ returns.");
    505     STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOWrite,           STAMTYPE_COUNTER, "/VMM/RZRet/MMIOWrite",           STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_WRITE returns.");
    506     STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOReadWrite,       STAMTYPE_COUNTER, "/VMM/RZRet/MMIOReadWrite",       STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_READ_WRITE returns.");
    507     STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOPatchRead,       STAMTYPE_COUNTER, "/VMM/RZRet/MMIOPatchRead",       STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_PATCH_READ returns.");
    508     STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOPatchWrite,      STAMTYPE_COUNTER, "/VMM/RZRet/MMIOPatchWrite",      STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_PATCH_WRITE returns.");
    509     STAM_REG(pVM, &pVM->vmm.s.StatRZRetLDTFault,            STAMTYPE_COUNTER, "/VMM/RZRet/LDTFault",            STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_GDT_FAULT returns.");
    510     STAM_REG(pVM, &pVM->vmm.s.StatRZRetGDTFault,            STAMTYPE_COUNTER, "/VMM/RZRet/GDTFault",            STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_LDT_FAULT returns.");
    511     STAM_REG(pVM, &pVM->vmm.s.StatRZRetIDTFault,            STAMTYPE_COUNTER, "/VMM/RZRet/IDTFault",            STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_IDT_FAULT returns.");
    512     STAM_REG(pVM, &pVM->vmm.s.StatRZRetTSSFault,            STAMTYPE_COUNTER, "/VMM/RZRet/TSSFault",            STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_TSS_FAULT returns.");
    513     STAM_REG(pVM, &pVM->vmm.s.StatRZRetPDFault,             STAMTYPE_COUNTER, "/VMM/RZRet/PDFault",             STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_PD_FAULT returns.");
    514     STAM_REG(pVM, &pVM->vmm.s.StatRZRetCSAMTask,            STAMTYPE_COUNTER, "/VMM/RZRet/CSAMTask",            STAMUNIT_OCCURENCES, "Number of VINF_CSAM_PENDING_ACTION returns.");
    515     STAM_REG(pVM, &pVM->vmm.s.StatRZRetSyncCR3,             STAMTYPE_COUNTER, "/VMM/RZRet/SyncCR",              STAMUNIT_OCCURENCES, "Number of VINF_PGM_SYNC_CR3 returns.");
    516     STAM_REG(pVM, &pVM->vmm.s.StatRZRetMisc,                STAMTYPE_COUNTER, "/VMM/RZRet/Misc",                STAMUNIT_OCCURENCES, "Number of misc returns.");
    517     STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchInt3,           STAMTYPE_COUNTER, "/VMM/RZRet/PatchInt3",           STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_INT3 returns.");
    518     STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchPF,             STAMTYPE_COUNTER, "/VMM/RZRet/PatchPF",             STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_TRAP_PF returns.");
    519     STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchGP,             STAMTYPE_COUNTER, "/VMM/RZRet/PatchGP",             STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_TRAP_GP returns.");
    520     STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchIretIRQ,        STAMTYPE_COUNTER, "/VMM/RZRet/PatchIret",           STAMUNIT_OCCURENCES, "Number of VINF_PATM_PENDING_IRQ_AFTER_IRET returns.");
    521     STAM_REG(pVM, &pVM->vmm.s.StatRZRetPageOverflow,        STAMTYPE_COUNTER, "/VMM/RZRet/InvlpgOverflow",      STAMUNIT_OCCURENCES, "Number of VERR_REM_FLUSHED_PAGES_OVERFLOW returns.");
    522     STAM_REG(pVM, &pVM->vmm.s.StatRZRetRescheduleREM,       STAMTYPE_COUNTER, "/VMM/RZRet/ScheduleREM",         STAMUNIT_OCCURENCES, "Number of VINF_EM_RESCHEDULE_REM returns.");
    523     STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3,                STAMTYPE_COUNTER, "/VMM/RZRet/ToR3",                STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns.");
    524     STAM_REG(pVM, &pVM->vmm.s.StatRZRetTimerPending,        STAMTYPE_COUNTER, "/VMM/RZRet/TimerPending",        STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TIMER_PENDING returns.");
    525     STAM_REG(pVM, &pVM->vmm.s.StatRZRetInterruptPending,    STAMTYPE_COUNTER, "/VMM/RZRet/InterruptPending",    STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT_PENDING returns.");
    526     STAM_REG(pVM, &pVM->vmm.s.StatRZRetPATMDuplicateFn,     STAMTYPE_COUNTER, "/VMM/RZRet/PATMDuplicateFn",     STAMUNIT_OCCURENCES, "Number of VINF_PATM_DUPLICATE_FUNCTION returns.");
    527     STAM_REG(pVM, &pVM->vmm.s.StatRZRetPGMChangeMode,       STAMTYPE_COUNTER, "/VMM/RZRet/PGMChangeMode",       STAMUNIT_OCCURENCES, "Number of VINF_PGM_CHANGE_MODE returns.");
    528     STAM_REG(pVM, &pVM->vmm.s.StatRZRetEmulHlt,             STAMTYPE_COUNTER, "/VMM/RZRet/EmulHlt",             STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_EMULATE_INSTR_HLT returns.");
    529     STAM_REG(pVM, &pVM->vmm.s.StatRZRetPendingRequest,      STAMTYPE_COUNTER, "/VMM/RZRet/PendingRequest",      STAMUNIT_OCCURENCES, "Number of VINF_EM_PENDING_REQUEST returns.");
    530 
    531     STAM_REG(pVM, &pVM->vmm.s.StatRZRetCallHost,            STAMTYPE_COUNTER, "/VMM/RZCallR3/Misc",             STAMUNIT_OCCURENCES, "Number of Other ring-3 calls.");
    532     STAM_REG(pVM, &pVM->vmm.s.StatRZCallPDMLock,            STAMTYPE_COUNTER, "/VMM/RZCallR3/PDMLock",          STAMUNIT_OCCURENCES, "Number of VMMCALLHOST_PDM_LOCK calls.");
    533     STAM_REG(pVM, &pVM->vmm.s.StatRZCallPDMQueueFlush,      STAMTYPE_COUNTER, "/VMM/RZCallR3/PDMQueueFlush",    STAMUNIT_OCCURENCES, "Number of VMMCALLHOST_PDM_QUEUE_FLUSH calls.");
    534     STAM_REG(pVM, &pVM->vmm.s.StatRZCallPGMLock,            STAMTYPE_COUNTER, "/VMM/RZCallR3/PGMLock",          STAMUNIT_OCCURENCES, "Number of VMMCALLHOST_PGM_LOCK calls.");
    535     STAM_REG(pVM, &pVM->vmm.s.StatRZCallPGMPoolGrow,        STAMTYPE_COUNTER, "/VMM/RZCallR3/PGMPoolGrow",      STAMUNIT_OCCURENCES, "Number of VMMCALLHOST_PGM_POOL_GROW calls.");
    536     STAM_REG(pVM, &pVM->vmm.s.StatRZCallPGMMapChunk,        STAMTYPE_COUNTER, "/VMM/RZCallR3/PGMMapChunk",      STAMUNIT_OCCURENCES, "Number of VMMCALLHOST_PGM_MAP_CHUNK calls.");
    537     STAM_REG(pVM, &pVM->vmm.s.StatRZCallPGMAllocHandy,      STAMTYPE_COUNTER, "/VMM/RZCallR3/PGMAllocHandy",    STAMUNIT_OCCURENCES, "Number of VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES calls.");
    538 #ifndef VBOX_WITH_NEW_PHYS_CODE
    539     STAM_REG(pVM, &pVM->vmm.s.StatRZCallPGMGrowRAM,         STAMTYPE_COUNTER, "/VMM/RZCallR3/PGMGrowRAM",       STAMUNIT_OCCURENCES, "Number of VMMCALLHOST_PGM_RAM_GROW_RANGE calls.");
    540 #endif
    541     STAM_REG(pVM, &pVM->vmm.s.StatRZCallRemReplay,          STAMTYPE_COUNTER, "/VMM/RZCallR3/REMReplay",        STAMUNIT_OCCURENCES, "Number of VMMCALLHOST_REM_REPLAY_HANDLER_NOTIFICATIONS calls.");
    542     STAM_REG(pVM, &pVM->vmm.s.StatRZCallLogFlush,           STAMTYPE_COUNTER, "/VMM/RZCallR3/VMMLogFlush",      STAMUNIT_OCCURENCES, "Number of VMMCALLHOST_VMM_LOGGER_FLUSH calls.");
    543     STAM_REG(pVM, &pVM->vmm.s.StatRZCallVMSetError,         STAMTYPE_COUNTER, "/VMM/RZCallR3/VMSetError",       STAMUNIT_OCCURENCES, "Number of VMMCALLHOST_VM_SET_ERROR calls.");
    544     STAM_REG(pVM, &pVM->vmm.s.StatRZCallVMSetRuntimeError,  STAMTYPE_COUNTER, "/VMM/RZCallR3/VMRuntimeError",   STAMUNIT_OCCURENCES, "Number of VMMCALLHOST_VM_SET_RUNTIME_ERROR calls.");
    545 }
    546 
    547 
    548 /**
    549  * Ring-3 init finalizing.
    550  *
    551  * @returns VBox status code.
    552  * @param   pVM         The VM handle.
    553  */
    554 VMMR3DECL(int) VMMR3InitFinalize(PVM pVM)
    555 {
    556 #ifdef VBOX_STRICT_VMM_STACK
    557     /*
    558      * Two inaccessible pages at each sides of the stack to catch over/under-flows.
    559      */
    560     memset(pVM->vmm.s.pbEMTStackR3 - PAGE_SIZE, 0xcc, PAGE_SIZE);
    561     PGMMapSetPage(pVM, MMHyperR3ToRC(pVM, pVM->vmm.s.pbEMTStackR3 - PAGE_SIZE), PAGE_SIZE, 0);
    562     RTMemProtect(pVM->vmm.s.pbEMTStackR3 - PAGE_SIZE, PAGE_SIZE, RTMEM_PROT_NONE);
    563 
    564     memset(pVM->vmm.s.pbEMTStackR3 + VMM_STACK_SIZE, 0xcc, PAGE_SIZE);
    565     PGMMapSetPage(pVM, MMHyperR3ToRC(pVM, pVM->vmm.s.pbEMTStackR3 + VMM_STACK_SIZE), PAGE_SIZE, 0);
    566     RTMemProtect(pVM->vmm.s.pbEMTStackR3 + VMM_STACK_SIZE, PAGE_SIZE, RTMEM_PROT_NONE);
    567 #endif
    568 
    569     /*
    570      * Set page attributes to r/w for stack pages.
    571      */
    572     int rc = PGMMapSetPage(pVM, pVM->vmm.s.pbEMTStackRC, VMM_STACK_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
    573     AssertRC(rc);
    574     if (VBOX_SUCCESS(rc))
    575     {
    576         /*
    577          * Create the EMT yield timer.
    578          */
    579         rc = TMR3TimerCreateInternal(pVM, TMCLOCK_REAL, vmmR3YieldEMT, NULL, "EMT Yielder", &pVM->vmm.s.pYieldTimer);
    580         if (VBOX_SUCCESS(rc))
    581            rc = TMTimerSetMillies(pVM->vmm.s.pYieldTimer, pVM->vmm.s.cYieldEveryMillies);
    582     }
    583 
    584 #ifdef VBOX_WITH_NMI
    585     /*
    586      * Map the host APIC into GC - This is AMD/Intel + Host OS specific!
    587      */
    588     if (VBOX_SUCCESS(rc))
    589         rc = PGMMap(pVM, pVM->vmm.s.GCPtrApicBase, 0xfee00000, PAGE_SIZE,
    590                     X86_PTE_P | X86_PTE_RW | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_A | X86_PTE_D);
    591 #endif
    592     return rc;
    593 }
    594 
    595 
    596 /**
    597  * Initializes the R0 VMM.
    598  *
    599  * @returns VBox status code.
    600  * @param   pVM         The VM to operate on.
    601  */
    602 VMMR3DECL(int) VMMR3InitR0(PVM pVM)
    603 {
    604     int rc;
    605 
    606     /*
    607      * Initialize the ring-0 logger if we haven't done so yet.
    608      */
    609     if (    pVM->vmm.s.pR0LoggerR3
    610         &&  !pVM->vmm.s.pR0LoggerR3->fCreated)
    611     {
    612         rc = VMMR3UpdateLoggers(pVM);
    613         if (VBOX_FAILURE(rc))
    614             return rc;
    615     }
    616 
    617     /*
    618      * Call Ring-0 entry with init code.
    619      */
    620     for (;;)
    621     {
    622 #ifdef NO_SUPCALLR0VMM
    623         //rc = VERR_GENERAL_FAILURE;
    624         rc = VINF_SUCCESS;
    625 #else
    626         rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_VMMR0_INIT, VMMGetSvnRev(), NULL);
    627 #endif
    628         if (    pVM->vmm.s.pR0LoggerR3
    629             &&  pVM->vmm.s.pR0LoggerR3->Logger.offScratch > 0)
    630             RTLogFlushToLogger(&pVM->vmm.s.pR0LoggerR3->Logger, NULL);
    631         if (rc != VINF_VMM_CALL_HOST)
    632             break;
    633         rc = vmmR3ServiceCallHostRequest(pVM);
    634         if (VBOX_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
    635             break;
    636         /* Resume R0 */
    637     }
    638 
    639     if (VBOX_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
    640     {
    641         LogRel(("R0 init failed, rc=%Vra\n", rc));
    642         if (VBOX_SUCCESS(rc))
    643             rc = VERR_INTERNAL_ERROR;
    644     }
    645     return rc;
    646 }
    647 
    648 
    649 /**
    650  * Initializes the RC VMM.
    651  *
    652  * @returns VBox status code.
    653  * @param   pVM         The VM to operate on.
    654  */
    655 VMMR3DECL(int) VMMR3InitRC(PVM pVM)
    656 {
    657     /* In VMX mode, there's no need to init RC. */
    658     if (pVM->vmm.s.fSwitcherDisabled)
    659         return VINF_SUCCESS;
    660 
    661     /*
    662      * Call VMMGCInit():
    663      *      -# resolve the address.
    664      *      -# setup stackframe and EIP to use the trampoline.
    665      *      -# do a generic hypervisor call.
    666      */
    667     RTGCPTR32 GCPtrEP;
    668     int rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "VMMGCEntry", &GCPtrEP);
    669     if (VBOX_SUCCESS(rc))
    670     {
    671         CPUMHyperSetCtxCore(pVM, NULL);
    672         CPUMSetHyperESP(pVM, pVM->vmm.s.pbEMTStackBottomRC); /* Clear the stack. */
    673         uint64_t u64TS = RTTimeProgramStartNanoTS();
    674         CPUMPushHyper(pVM, (uint32_t)(u64TS >> 32));    /* Param 3: The program startup TS - Hi. */
    675         CPUMPushHyper(pVM, (uint32_t)u64TS);            /* Param 3: The program startup TS - Lo. */
    676         CPUMPushHyper(pVM, VMMGetSvnRev());             /* Param 2: Version argument. */
    677         CPUMPushHyper(pVM, VMMGC_DO_VMMGC_INIT);        /* Param 1: Operation. */
    678         CPUMPushHyper(pVM, pVM->pVMRC);                 /* Param 0: pVM */
    679         CPUMPushHyper(pVM, 3 * sizeof(RTGCPTR32));      /* trampoline param: stacksize.  */
    680         CPUMPushHyper(pVM, GCPtrEP);                    /* Call EIP. */
    681         CPUMSetHyperEIP(pVM, pVM->vmm.s.pfnCallTrampolineRC);
    682 
    683         for (;;)
    684         {
    685 #ifdef NO_SUPCALLR0VMM
    686             //rc = VERR_GENERAL_FAILURE;
    687             rc = VINF_SUCCESS;
    688 #else
    689             rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_CALL_HYPERVISOR, NULL);
    690 #endif
    691 #ifdef LOG_ENABLED
    692             PRTLOGGERRC pLogger = pVM->vmm.s.pRCLoggerR3;
    693             if (    pLogger
    694                 &&  pLogger->offScratch > 0)
    695                 RTLogFlushRC(NULL, pLogger);
    696 #endif
    697 #ifdef VBOX_WITH_RC_RELEASE_LOGGING
    698             PRTLOGGERRC pRelLogger = pVM->vmm.s.pRCRelLoggerR3;
    699             if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
    700                 RTLogFlushRC(RTLogRelDefaultInstance(), pRelLogger);
    701 #endif
    702             if (rc != VINF_VMM_CALL_HOST)
    703                 break;
    704             rc = vmmR3ServiceCallHostRequest(pVM);
    705             if (VBOX_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
    706                 break;
    707         }
    708 
    709         if (VBOX_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
    710         {
    711             VMMR3FatalDump(pVM, rc);
    712             if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
    713                 rc = VERR_INTERNAL_ERROR;
    714         }
    715         AssertRC(rc);
    716     }
    717     return rc;
    718 }
    719 
    720 
    721 /**
    722  * Terminate the VMM bits.
    723  *
    724  * @returns VINF_SUCCESS.
    725  * @param   pVM         The VM handle.
    726  */
    727 VMMR3DECL(int) VMMR3Term(PVM pVM)
    728 {
    729     /*
    730      * Call Ring-0 entry with termination code.
    731      */
    732     int rc;
    733     for (;;)
    734     {
    735 #ifdef NO_SUPCALLR0VMM
    736         //rc = VERR_GENERAL_FAILURE;
    737         rc = VINF_SUCCESS;
    738 #else
    739         rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_VMMR0_TERM, 0, NULL);
    740 #endif
    741         if (    pVM->vmm.s.pR0LoggerR3
    742             &&  pVM->vmm.s.pR0LoggerR3->Logger.offScratch > 0)
    743             RTLogFlushToLogger(&pVM->vmm.s.pR0LoggerR3->Logger, NULL);
    744         if (rc != VINF_VMM_CALL_HOST)
    745             break;
    746         rc = vmmR3ServiceCallHostRequest(pVM);
    747         if (VBOX_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
    748             break;
    749         /* Resume R0 */
    750     }
    751     if (VBOX_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
    752     {
    753         LogRel(("VMMR3Term: R0 term failed, rc=%Vra. (warning)\n", rc));
    754         if (VBOX_SUCCESS(rc))
    755             rc = VERR_INTERNAL_ERROR;
    756     }
    757 
    758 #ifdef VBOX_STRICT_VMM_STACK
    759     /*
    760      * Make the two stack guard pages present again.
    761      */
    762     RTMemProtect(pVM->vmm.s.pbEMTStackR3 - PAGE_SIZE,      PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
    763     RTMemProtect(pVM->vmm.s.pbEMTStackR3 + VMM_STACK_SIZE, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
    764 #endif
    765     return rc;
    766 }
    767 
    768 
    769 /**
    770  * Terminates the per-VCPU VMM.
    771  *
    772  * Termination means cleaning up and freeing all resources,
    773  * the VM it self is at this point powered off or suspended.
    774  *
    775  * @returns VBox status code.
    776  * @param   pVM         The VM to operate on.
    777  */
    778 VMMR3DECL(int) VMMR3TermCPU(PVM pVM)
    779 {
    780     return VINF_SUCCESS;
    781 }
    782 
    783 
    784 /**
    785  * Applies relocations to data and code managed by this
    786  * component. This function will be called at init and
    787  * whenever the VMM need to relocate it self inside the GC.
    788  *
    789  * The VMM will need to apply relocations to the core code.
    790  *
    791  * @param   pVM         The VM handle.
    792  * @param   offDelta    The relocation delta.
    793  */
    794 VMMR3DECL(void) VMMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
    795 {
    796     LogFlow(("VMMR3Relocate: offDelta=%VGv\n", offDelta));
    797 
    798     /*
    799      * Recalc the RC address.
    800      */
    801     pVM->vmm.s.pvCoreCodeRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pvCoreCodeR3);
    802 
    803     /*
    804      * The stack.
    805      */
    806     CPUMSetHyperESP(pVM, CPUMGetHyperESP(pVM) + offDelta);
    807     pVM->vmm.s.pbEMTStackRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pbEMTStackR3);
    808     pVM->vmm.s.pbEMTStackBottomRC = pVM->vmm.s.pbEMTStackRC + VMM_STACK_SIZE;
    809 
    810     /*
    811      * All the switchers.
     246     * Relocate all the switchers.
    812247     */
    813248    for (unsigned iSwitcher = 0; iSwitcher < RT_ELEMENTS(s_apSwitchers); iSwitcher++)
     
    837272    pVM->pfnVMMGCGuestToHostAsmGuestCtx = RCPtr + pSwitcher->offGCGuestToHostAsmGuestCtx;
    838273
    839     /*
    840      * Get other RC entry points.
    841      */
    842     int rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "CPUMGCResumeGuest", &pVM->vmm.s.pfnCPUMRCResumeGuest);
    843     AssertReleaseMsgRC(rc, ("CPUMGCResumeGuest not found! rc=%Vra\n", rc));
    844 
    845     rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "CPUMGCResumeGuestV86", &pVM->vmm.s.pfnCPUMRCResumeGuestV86);
    846     AssertReleaseMsgRC(rc, ("CPUMGCResumeGuestV86 not found! rc=%Vra\n", rc));
    847 
    848     /*
    849      * Update the logger.
    850      */
    851     VMMR3UpdateLoggers(pVM);
    852274}
    853275
    854276
    855277/**
    856  * Updates the settings for the RC and R0 loggers.
    857  *
    858  * @returns VBox status code.
    859  * @param   pVM     The VM handle.
    860  */
    861 VMMR3DECL(int)  VMMR3UpdateLoggers(PVM pVM)
    862 {
    863     /*
    864      * Simply clone the logger instance (for RC).
    865      */
    866     int rc = VINF_SUCCESS;
    867     RTRCPTR RCPtrLoggerFlush = 0;
    868 
    869     if (pVM->vmm.s.pRCLoggerR3
    870 #ifdef VBOX_WITH_RC_RELEASE_LOGGING
    871         || pVM->vmm.s.pRCRelLoggerR3
    872 #endif
    873        )
    874     {
    875         rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCLoggerFlush", &RCPtrLoggerFlush);
    876         AssertReleaseMsgRC(rc, ("vmmGCLoggerFlush not found! rc=%Vra\n", rc));
    877     }
    878 
    879     if (pVM->vmm.s.pRCLoggerR3)
    880     {
    881         RTRCPTR RCPtrLoggerWrapper = 0;
    882         rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCLoggerWrapper", &RCPtrLoggerWrapper);
    883         AssertReleaseMsgRC(rc, ("vmmGCLoggerWrapper not found! rc=%Vra\n", rc));
    884 
    885         pVM->vmm.s.pRCLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCLoggerR3);
    886         rc = RTLogCloneRC(NULL /* default */, pVM->vmm.s.pRCLoggerR3, pVM->vmm.s.cbRCLogger,
    887                           RCPtrLoggerWrapper,  RCPtrLoggerFlush, RTLOGFLAGS_BUFFERED);
    888         AssertReleaseMsgRC(rc, ("RTLogCloneRC failed! rc=%Vra\n", rc));
    889     }
    890 
    891 #ifdef VBOX_WITH_RC_RELEASE_LOGGING
    892     if (pVM->vmm.s.pRCRelLoggerR3)
    893     {
    894         RTRCPTR RCPtrLoggerWrapper = 0;
    895         rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCRelLoggerWrapper", &RCPtrLoggerWrapper);
    896         AssertReleaseMsgRC(rc, ("vmmGCRelLoggerWrapper not found! rc=%Vra\n", rc));
    897 
    898         pVM->vmm.s.pRCRelLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCRelLoggerR3);
    899         rc = RTLogCloneRC(RTLogRelDefaultInstance(), pVM->vmm.s.pRCRelLoggerR3, pVM->vmm.s.cbRCRelLogger,
    900                           RCPtrLoggerWrapper,  RCPtrLoggerFlush, RTLOGFLAGS_BUFFERED);
    901         AssertReleaseMsgRC(rc, ("RTLogCloneRC failed! rc=%Vra\n", rc));
    902     }
    903 #endif /* VBOX_WITH_RC_RELEASE_LOGGING */
    904 
    905     /*
    906      * For the ring-0 EMT logger, we use a per-thread logger instance
    907      * in ring-0. Only initialize it once.
    908      */
    909     PVMMR0LOGGER pR0LoggerR3 = pVM->vmm.s.pR0LoggerR3;
    910     if (pR0LoggerR3)
    911     {
    912         if (!pR0LoggerR3->fCreated)
    913         {
    914             RTR0PTR pfnLoggerWrapper = NIL_RTR0PTR;
    915             rc = PDMR3LdrGetSymbolR0(pVM, VMMR0_MAIN_MODULE_NAME, "vmmR0LoggerWrapper", &pfnLoggerWrapper);
    916             AssertReleaseMsgRCReturn(rc, ("VMMLoggerWrapper not found! rc=%Vra\n", rc), rc);
    917 
    918             RTR0PTR pfnLoggerFlush = NIL_RTR0PTR;
    919             rc = PDMR3LdrGetSymbolR0(pVM, VMMR0_MAIN_MODULE_NAME, "vmmR0LoggerFlush", &pfnLoggerFlush);
    920             AssertReleaseMsgRCReturn(rc, ("VMMLoggerFlush not found! rc=%Vra\n", rc), rc);
    921 
    922             rc = RTLogCreateForR0(&pR0LoggerR3->Logger, pR0LoggerR3->cbLogger,
    923                                   *(PFNRTLOGGER *)&pfnLoggerWrapper, *(PFNRTLOGFLUSH *)&pfnLoggerFlush,
    924                                   RTLOGFLAGS_BUFFERED, RTLOGDEST_DUMMY);
    925             AssertReleaseMsgRCReturn(rc, ("RTLogCreateForR0 failed! rc=%Vra\n", rc), rc);
    926             pR0LoggerR3->fCreated = true;
    927         }
    928 
    929         rc = RTLogCopyGroupsAndFlags(&pR0LoggerR3->Logger, NULL /* default */, pVM->vmm.s.pRCLoggerR3->fFlags, RTLOGFLAGS_BUFFERED);
    930         AssertRC(rc);
    931     }
    932 
    933     return rc;
    934 }
    935 
    936 
    937 /**
    938  * Generic switch code relocator.
     278 * Generic switcher code relocator.
    939279 *
    940280 * @param   pVM         The VM handle.
     
    1561901
    1562902/**
    1563  * Gets the pointer to g_szRTAssertMsg1 in GC.
    1564  * @returns Pointer to VMMGC::g_szRTAssertMsg1.
    1565  *          Returns NULL if not present.
    1566  * @param   pVM         The VM handle.
    1567  */
    1568 VMMR3DECL(const char *) VMMR3GetGCAssertMsg1(PVM pVM)
    1569 {
    1570     RTGCPTR32 GCPtr;
    1571     int rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_szRTAssertMsg1", &GCPtr);
    1572     if (VBOX_SUCCESS(rc))
    1573         return (const char *)MMHyperGC2HC(pVM, GCPtr);
    1574     return NULL;
    1575 }
    1576 
    1577 
    1578 /**
    1579  * Gets the pointer to g_szRTAssertMsg2 in GC.
    1580  * @returns Pointer to VMMGC::g_szRTAssertMsg2.
    1581  *          Returns NULL if not present.
    1582  * @param   pVM         The VM handle.
    1583  */
    1584 VMMR3DECL(const char *) VMMR3GetGCAssertMsg2(PVM pVM)
    1585 {
    1586     RTGCPTR32 GCPtr;
    1587     int rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_szRTAssertMsg2", &GCPtr);
    1588     if (VBOX_SUCCESS(rc))
    1589         return (const char *)MMHyperGC2HC(pVM, GCPtr);
    1590     return NULL;
    1591 }
    1592 
    1593 
    1594 /**
    1595  * Execute state save operation.
    1596  *
    1597  * @returns VBox status code.
    1598  * @param   pVM             VM Handle.
    1599  * @param   pSSM            SSM operation handle.
    1600  */
    1601 static DECLCALLBACK(int) vmmR3Save(PVM pVM, PSSMHANDLE pSSM)
    1602 {
    1603     LogFlow(("vmmR3Save:\n"));
    1604 
    1605     /*
    1606      * The hypervisor stack.
    1607      * Note! See not in vmmR3Load.
    1608      */
    1609     SSMR3PutRCPtr(pSSM, pVM->vmm.s.pbEMTStackBottomRC);
    1610     RTRCPTR RCPtrESP = CPUMGetHyperESP(pVM);
    1611     AssertMsg(pVM->vmm.s.pbEMTStackBottomRC - RCPtrESP <= VMM_STACK_SIZE, ("Bottom %RRv ESP=%RRv\n", pVM->vmm.s.pbEMTStackBottomRC, RCPtrESP));
    1612     SSMR3PutRCPtr(pSSM, RCPtrESP);
    1613     SSMR3PutMem(pSSM, pVM->vmm.s.pbEMTStackR3, VMM_STACK_SIZE);
    1614     return SSMR3PutU32(pSSM, ~0); /* terminator */
    1615 }
    1616 
    1617 
    1618 /**
    1619  * Execute state load operation.
    1620  *
    1621  * @returns VBox status code.
    1622  * @param   pVM             VM Handle.
    1623  * @param   pSSM            SSM operation handle.
    1624  * @param   u32Version      Data layout version.
    1625  */
    1626 static DECLCALLBACK(int) vmmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
    1627 {
    1628     LogFlow(("vmmR3Load:\n"));
    1629 
    1630     /*
    1631      * Validate version.
    1632      */
    1633     if (u32Version != VMM_SAVED_STATE_VERSION)
    1634     {
    1635         AssertMsgFailed(("vmmR3Load: Invalid version u32Version=%d!\n", u32Version));
    1636         return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
    1637     }
    1638 
    1639     /*
    1640      * Check that the stack is in the same place, or that it's fearly empty.
    1641      *
    1642      * Note! This can be skipped next time we update saved state as we will
    1643      *       never be in a R0/RC -> ring-3 call when saving the state. The
    1644      *       stack and the two associated pointers are not required.
    1645      */
    1646     RTRCPTR RCPtrStackBottom;
    1647     SSMR3GetRCPtr(pSSM, &RCPtrStackBottom);
    1648     RTRCPTR RCPtrESP;
    1649     int rc = SSMR3GetRCPtr(pSSM, &RCPtrESP);
    1650     if (VBOX_FAILURE(rc))
    1651         return rc;
    1652 
    1653     /* restore the stack.  */
    1654     SSMR3GetMem(pSSM, pVM->vmm.s.pbEMTStackR3, VMM_STACK_SIZE);
    1655 
    1656     /* terminator */
    1657     uint32_t u32;
    1658     rc = SSMR3GetU32(pSSM, &u32);
    1659     if (VBOX_FAILURE(rc))
    1660         return rc;
    1661     if (u32 != ~0U)
    1662     {
    1663         AssertMsgFailed(("u32=%#x\n", u32));
    1664         return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
    1665     }
    1666     return VINF_SUCCESS;
    1667 }
    1668 
    1669 
    1670 /**
    1671903 * Selects the switcher to be used for switching to GC.
    1672904 *
     
    1712944        return VINF_SUCCESS;
    1713945    }
     946
    1714947    return VERR_NOT_IMPLEMENTED;
    1715948}
     949
    1716950
    1717951/**
     
    1734968}
    1735969
    1736 
    1737 /**
    1738  * Resolve a builtin RC symbol.
    1739  *
    1740  * Called by PDM when loading or relocating RC modules.
    1741  *
    1742  * @returns VBox status
    1743  * @param   pVM             VM Handle.
    1744  * @param   pszSymbol       Symbol to resolv
    1745  * @param   pRCPtrValue     Where to store the symbol value.
    1746  *
    1747  * @remark  This has to work before VMMR3Relocate() is called.
    1748  */
    1749 VMMR3DECL(int) VMMR3GetImportRC(PVM pVM, const char *pszSymbol, PRTRCPTR pRCPtrValue)
    1750 {
    1751     if (!strcmp(pszSymbol, "g_Logger"))
    1752     {
    1753         if (pVM->vmm.s.pRCLoggerR3)
    1754             pVM->vmm.s.pRCLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCLoggerR3);
    1755         *pRCPtrValue = pVM->vmm.s.pRCLoggerRC;
    1756     }
    1757     else if (!strcmp(pszSymbol, "g_RelLogger"))
    1758     {
    1759 #ifdef VBOX_WITH_RC_RELEASE_LOGGING
    1760         if (pVM->vmm.s.pRCRelLoggerR3)
    1761             pVM->vmm.s.pRCRelLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCRelLoggerR3);
    1762         *pRCPtrValue = pVM->vmm.s.pRCRelLoggerRC;
    1763 #else
    1764         *pRCPtrValue = NIL_RTRCPTR;
    1765 #endif
    1766     }
    1767     else
    1768         return VERR_SYMBOL_NOT_FOUND;
    1769     return VINF_SUCCESS;
    1770 }
    1771 
    1772 
    1773 /**
    1774  * Suspends the the CPU yielder.
    1775  *
    1776  * @param   pVM             The VM handle.
    1777  */
    1778 VMMR3DECL(void) VMMR3YieldSuspend(PVM pVM)
    1779 {
    1780     if (!pVM->vmm.s.cYieldResumeMillies)
    1781     {
    1782         uint64_t u64Now = TMTimerGet(pVM->vmm.s.pYieldTimer);
    1783         uint64_t u64Expire = TMTimerGetExpire(pVM->vmm.s.pYieldTimer);
    1784         if (u64Now >= u64Expire || u64Expire == ~(uint64_t)0)
    1785             pVM->vmm.s.cYieldResumeMillies = pVM->vmm.s.cYieldEveryMillies;
    1786         else
    1787             pVM->vmm.s.cYieldResumeMillies = TMTimerToMilli(pVM->vmm.s.pYieldTimer, u64Expire - u64Now);
    1788         TMTimerStop(pVM->vmm.s.pYieldTimer);
    1789     }
    1790     pVM->vmm.s.u64LastYield = RTTimeNanoTS();
    1791 }
    1792 
    1793 
    1794 /**
    1795  * Stops the the CPU yielder.
    1796  *
    1797  * @param   pVM             The VM handle.
    1798  */
    1799 VMMR3DECL(void) VMMR3YieldStop(PVM pVM)
    1800 {
    1801     if (!pVM->vmm.s.cYieldResumeMillies)
    1802         TMTimerStop(pVM->vmm.s.pYieldTimer);
    1803     pVM->vmm.s.cYieldResumeMillies = pVM->vmm.s.cYieldEveryMillies;
    1804     pVM->vmm.s.u64LastYield = RTTimeNanoTS();
    1805 }
    1806 
    1807 
    1808 /**
    1809  * Resumes the CPU yielder when it has been a suspended or stopped.
    1810  *
    1811  * @param   pVM             The VM handle.
    1812  */
    1813 VMMR3DECL(void) VMMR3YieldResume(PVM pVM)
    1814 {
    1815     if (pVM->vmm.s.cYieldResumeMillies)
    1816     {
    1817         TMTimerSetMillies(pVM->vmm.s.pYieldTimer, pVM->vmm.s.cYieldResumeMillies);
    1818         pVM->vmm.s.cYieldResumeMillies = 0;
    1819     }
    1820 }
    1821 
    1822 
    1823 /**
    1824  * Internal timer callback function.
    1825  *
    1826  * @param   pVM             The VM.
    1827  * @param   pTimer          The timer handle.
    1828  * @param   pvUser          User argument specified upon timer creation.
    1829  */
    1830 static DECLCALLBACK(void) vmmR3YieldEMT(PVM pVM, PTMTIMER pTimer, void *pvUser)
    1831 {
    1832     /*
    1833      * This really needs some careful tuning. While we shouldn't be too gready since
    1834      * that'll cause the rest of the system to stop up, we shouldn't be too nice either
    1835      * because that'll cause us to stop up.
    1836      *
    1837      * The current logic is to use the default interval when there is no lag worth
    1838      * mentioning, but when we start accumulating lag we don't bother yielding at all.
    1839      *
    1840      * (This depends on the TMCLOCK_VIRTUAL_SYNC to be scheduled before TMCLOCK_REAL
    1841      * so the lag is up to date.)
    1842      */
    1843     const uint64_t u64Lag = TMVirtualSyncGetLag(pVM);
    1844     if (    u64Lag     <   50000000 /* 50ms */
    1845         ||  (   u64Lag < 1000000000 /*  1s */
    1846              && RTTimeNanoTS() - pVM->vmm.s.u64LastYield < 500000000 /* 500 ms */)
    1847        )
    1848     {
    1849         uint64_t u64Elapsed = RTTimeNanoTS();
    1850         pVM->vmm.s.u64LastYield = u64Elapsed;
    1851 
    1852         RTThreadYield();
    1853 
    1854 #ifdef LOG_ENABLED
    1855         u64Elapsed = RTTimeNanoTS() - u64Elapsed;
    1856         Log(("vmmR3YieldEMT: %RI64 ns\n", u64Elapsed));
    1857 #endif
    1858     }
    1859     TMTimerSetMillies(pTimer, pVM->vmm.s.cYieldEveryMillies);
    1860 }
    1861 
    1862 
    1863 /**
    1864  * Acquire global VM lock.
    1865  *
    1866  * @returns VBox status code
    1867  * @param   pVM         The VM to operate on.
    1868  *
    1869  * @remarks The global VMM lock isn't really used for anything any longer.
    1870  */
    1871 VMMR3DECL(int) VMMR3Lock(PVM pVM)
    1872 {
    1873     return RTCritSectEnter(&pVM->vmm.s.CritSectVMLock);
    1874 }
    1875 
    1876 
    1877 /**
    1878  * Release global VM lock.
    1879  *
    1880  * @returns VBox status code
    1881  * @param   pVM         The VM to operate on.
    1882  *
    1883  * @remarks The global VMM lock isn't really used for anything any longer.
    1884  */
    1885 VMMR3DECL(int) VMMR3Unlock(PVM pVM)
    1886 {
    1887     return RTCritSectLeave(&pVM->vmm.s.CritSectVMLock);
    1888 }
    1889 
    1890 
    1891 /**
    1892  * Return global VM lock owner.
    1893  *
    1894  * @returns Thread id of owner.
    1895  * @returns NIL_RTTHREAD if no owner.
    1896  * @param   pVM         The VM to operate on.
    1897  *
    1898  * @remarks The global VMM lock isn't really used for anything any longer.
    1899  */
    1900 VMMR3DECL(RTNATIVETHREAD) VMMR3LockGetOwner(PVM pVM)
    1901 {
    1902     return RTCritSectGetOwner(&pVM->vmm.s.CritSectVMLock);
    1903 }
    1904 
    1905 
    1906 /**
    1907  * Checks if the current thread is the owner of the global VM lock.
    1908  *
    1909  * @returns true if owner.
    1910  * @returns false if not owner.
    1911  * @param   pVM         The VM to operate on.
    1912  *
    1913  * @remarks The global VMM lock isn't really used for anything any longer.
    1914  */
    1915 VMMR3DECL(bool) VMMR3LockIsOwner(PVM pVM)
    1916 {
    1917     return RTCritSectIsOwner(&pVM->vmm.s.CritSectVMLock);
    1918 }
    1919 
    1920 
    1921 /**
    1922  * Executes guest code in the raw-mode context.
    1923  *
    1924  * @param   pVM         VM handle.
    1925  */
    1926 VMMR3DECL(int) VMMR3RawRunGC(PVM pVM)
    1927 {
    1928     Log2(("VMMR3RawRunGC: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
    1929 
    1930     /*
    1931      * Set the EIP and ESP.
    1932      */
    1933     CPUMSetHyperEIP(pVM, CPUMGetGuestEFlags(pVM) & X86_EFL_VM
    1934                     ? pVM->vmm.s.pfnCPUMRCResumeGuestV86
    1935                     : pVM->vmm.s.pfnCPUMRCResumeGuest);
    1936     CPUMSetHyperESP(pVM, pVM->vmm.s.pbEMTStackBottomRC);
    1937 
    1938     /*
    1939      * We hide log flushes (outer) and hypervisor interrupts (inner).
    1940      */
    1941     for (;;)
    1942     {
    1943         int rc;
    1944         do
    1945         {
    1946 #ifdef NO_SUPCALLR0VMM
    1947             rc = VERR_GENERAL_FAILURE;
    1948 #else
    1949             rc = SUPCallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN);
    1950             if (RT_LIKELY(rc == VINF_SUCCESS))
    1951                 rc = pVM->vmm.s.iLastGZRc;
    1952 #endif
    1953         } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
    1954 
    1955         /*
    1956          * Flush the logs.
    1957          */
    1958 #ifdef LOG_ENABLED
    1959         PRTLOGGERRC pLogger = pVM->vmm.s.pRCLoggerR3;
    1960         if (    pLogger
    1961             &&  pLogger->offScratch > 0)
    1962             RTLogFlushRC(NULL, pLogger);
    1963 #endif
    1964 #ifdef VBOX_WITH_RC_RELEASE_LOGGING
    1965         PRTLOGGERRC pRelLogger = pVM->vmm.s.pRCRelLoggerR3;
    1966         if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
    1967             RTLogFlushRC(RTLogRelDefaultInstance(), pRelLogger);
    1968 #endif
    1969         if (rc != VINF_VMM_CALL_HOST)
    1970         {
    1971             Log2(("VMMR3RawRunGC: returns %Vrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
    1972             return rc;
    1973         }
    1974         rc = vmmR3ServiceCallHostRequest(pVM);
    1975         if (VBOX_FAILURE(rc))
    1976             return rc;
    1977         /* Resume GC */
    1978     }
    1979 }
    1980 
    1981 
    1982 /**
    1983  * Executes guest code (Intel VT-x and AMD-V).
    1984  *
    1985  * @param   pVM         VM handle.
    1986  */
    1987 VMMR3DECL(int) VMMR3HwAccRunGC(PVM pVM)
    1988 {
    1989     Log2(("VMMR3HwAccRunGC: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
    1990 
    1991     for (;;)
    1992     {
    1993         int rc;
    1994         do
    1995         {
    1996 #ifdef NO_SUPCALLR0VMM
    1997             rc = VERR_GENERAL_FAILURE;
    1998 #else
    1999             rc = SUPCallVMMR0Fast(pVM->pVMR0, VMMR0_DO_HWACC_RUN);
    2000             if (RT_LIKELY(rc == VINF_SUCCESS))
    2001                 rc = pVM->vmm.s.iLastGZRc;
    2002 #endif
    2003         } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
    2004 
    2005 #ifdef LOG_ENABLED
    2006         /*
    2007          * Flush the log
    2008          */
    2009         PVMMR0LOGGER pR0LoggerR3 = pVM->vmm.s.pR0LoggerR3;
    2010         if (    pR0LoggerR3
    2011             &&  pR0LoggerR3->Logger.offScratch > 0)
    2012             RTLogFlushToLogger(&pR0LoggerR3->Logger, NULL);
    2013 #endif /* !LOG_ENABLED */
    2014         if (rc != VINF_VMM_CALL_HOST)
    2015         {
    2016             Log2(("VMMR3HwAccRunGC: returns %Vrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
    2017             return rc;
    2018         }
    2019         rc = vmmR3ServiceCallHostRequest(pVM);
    2020         if (VBOX_FAILURE(rc))
    2021             return rc;
    2022         /* Resume R0 */
    2023     }
    2024 }
    2025 
    2026 /**
    2027  * Calls GC a function.
    2028  *
    2029  * @param   pVM         The VM handle.
    2030  * @param   GCPtrEntry  The GC function address.
    2031  * @param   cArgs       The number of arguments in the ....
    2032  * @param   ...         Arguments to the function.
    2033  */
    2034 VMMR3DECL(int) VMMR3CallGC(PVM pVM, RTRCPTR GCPtrEntry, unsigned cArgs, ...)
    2035 {
    2036     va_list args;
    2037     va_start(args, cArgs);
    2038     int rc = VMMR3CallGCV(pVM, GCPtrEntry, cArgs, args);
    2039     va_end(args);
    2040     return rc;
    2041 }
    2042 
    2043 
    2044 /**
    2045  * Calls GC a function.
    2046  *
    2047  * @param   pVM         The VM handle.
    2048  * @param   GCPtrEntry  The GC function address.
    2049  * @param   cArgs       The number of arguments in the ....
    2050  * @param   args        Arguments to the function.
    2051  */
    2052 VMMR3DECL(int) VMMR3CallGCV(PVM pVM, RTRCPTR GCPtrEntry, unsigned cArgs, va_list args)
    2053 {
    2054     Log2(("VMMR3CallGCV: GCPtrEntry=%VRv cArgs=%d\n", GCPtrEntry, cArgs));
    2055 
    2056     /*
    2057      * Setup the call frame using the trampoline.
    2058      */
    2059     CPUMHyperSetCtxCore(pVM, NULL);
    2060     memset(pVM->vmm.s.pbEMTStackR3, 0xaa, VMM_STACK_SIZE); /* Clear the stack. */
    2061     CPUMSetHyperESP(pVM, pVM->vmm.s.pbEMTStackBottomRC - cArgs * sizeof(RTGCUINTPTR32));
    2062     PRTGCUINTPTR32 pFrame = (PRTGCUINTPTR32)(pVM->vmm.s.pbEMTStackR3 + VMM_STACK_SIZE) - cArgs;
    2063     int i = cArgs;
    2064     while (i-- > 0)
    2065         *pFrame++ = va_arg(args, RTGCUINTPTR32);
    2066 
    2067     CPUMPushHyper(pVM, cArgs * sizeof(RTGCUINTPTR32));                          /* stack frame size */
    2068     CPUMPushHyper(pVM, GCPtrEntry);                                             /* what to call */
    2069     CPUMSetHyperEIP(pVM, pVM->vmm.s.pfnCallTrampolineRC);
    2070 
    2071     /*
    2072      * We hide log flushes (outer) and hypervisor interrupts (inner).
    2073      */
    2074     for (;;)
    2075     {
    2076         int rc;
    2077         do
    2078         {
    2079 #ifdef NO_SUPCALLR0VMM
    2080             rc = VERR_GENERAL_FAILURE;
    2081 #else
    2082             rc = SUPCallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN);
    2083             if (RT_LIKELY(rc == VINF_SUCCESS))
    2084                 rc = pVM->vmm.s.iLastGZRc;
    2085 #endif
    2086         } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
    2087 
    2088         /*
    2089          * Flush the logs.
    2090          */
    2091 #ifdef LOG_ENABLED
    2092         PRTLOGGERRC pLogger = pVM->vmm.s.pRCLoggerR3;
    2093         if (    pLogger
    2094             &&  pLogger->offScratch > 0)
    2095             RTLogFlushRC(NULL, pLogger);
    2096 #endif
    2097 #ifdef VBOX_WITH_RC_RELEASE_LOGGING
    2098         PRTLOGGERRC pRelLogger = pVM->vmm.s.pRCRelLoggerR3;
    2099         if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
    2100             RTLogFlushRC(RTLogRelDefaultInstance(), pRelLogger);
    2101 #endif
    2102         if (rc == VERR_TRPM_PANIC || rc == VERR_TRPM_DONT_PANIC)
    2103             VMMR3FatalDump(pVM, rc);
    2104         if (rc != VINF_VMM_CALL_HOST)
    2105         {
    2106             Log2(("VMMR3CallGCV: returns %Vrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
    2107             return rc;
    2108         }
    2109         rc = vmmR3ServiceCallHostRequest(pVM);
    2110         if (VBOX_FAILURE(rc))
    2111             return rc;
    2112     }
    2113 }
    2114 
    2115 
    2116 /**
    2117  * Resumes executing hypervisor code when interrupted by a queue flush or a
    2118  * debug event.
    2119  *
    2120  * @returns VBox status code.
    2121  * @param   pVM         VM handle.
    2122  */
    2123 VMMR3DECL(int) VMMR3ResumeHyper(PVM pVM)
    2124 {
    2125     Log(("VMMR3ResumeHyper: eip=%VGv esp=%VGv\n", CPUMGetHyperEIP(pVM), CPUMGetHyperESP(pVM)));
    2126 
    2127     /*
    2128      * We hide log flushes (outer) and hypervisor interrupts (inner).
    2129      */
    2130     for (;;)
    2131     {
    2132         int rc;
    2133         do
    2134         {
    2135 #ifdef NO_SUPCALLR0VMM
    2136             rc = VERR_GENERAL_FAILURE;
    2137 #else
    2138             rc = SUPCallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN);
    2139             if (RT_LIKELY(rc == VINF_SUCCESS))
    2140                 rc = pVM->vmm.s.iLastGZRc;
    2141 #endif
    2142         } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
    2143 
    2144         /*
    2145          * Flush the loggers,
    2146          */
    2147 #ifdef LOG_ENABLED
    2148         PRTLOGGERRC pLogger = pVM->vmm.s.pRCLoggerR3;
    2149         if (    pLogger
    2150             &&  pLogger->offScratch > 0)
    2151             RTLogFlushRC(NULL, pLogger);
    2152 #endif
    2153 #ifdef VBOX_WITH_RC_RELEASE_LOGGING
    2154         PRTLOGGERRC pRelLogger = pVM->vmm.s.pRCRelLoggerR3;
    2155         if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
    2156             RTLogFlushRC(RTLogRelDefaultInstance(), pRelLogger);
    2157 #endif
    2158         if (rc == VERR_TRPM_PANIC || rc == VERR_TRPM_DONT_PANIC)
    2159             VMMR3FatalDump(pVM, rc);
    2160         if (rc != VINF_VMM_CALL_HOST)
    2161         {
    2162             Log(("VMMR3ResumeHyper: returns %Vrc\n", rc));
    2163             return rc;
    2164         }
    2165         rc = vmmR3ServiceCallHostRequest(pVM);
    2166         if (VBOX_FAILURE(rc))
    2167             return rc;
    2168     }
    2169 }
    2170 
    2171 
    2172 /**
    2173  * Service a call to the ring-3 host code.
    2174  *
    2175  * @returns VBox status code.
    2176  * @param   pVM     VM handle.
    2177  * @remark  Careful with critsects.
    2178  */
    2179 static int vmmR3ServiceCallHostRequest(PVM pVM)
    2180 {
    2181     switch (pVM->vmm.s.enmCallHostOperation)
    2182     {
    2183         /*
    2184          * Acquire the PDM lock.
    2185          */
    2186         case VMMCALLHOST_PDM_LOCK:
    2187         {
    2188             pVM->vmm.s.rcCallHost = PDMR3LockCall(pVM);
    2189             break;
    2190         }
    2191 
    2192         /*
    2193          * Flush a PDM queue.
    2194          */
    2195         case VMMCALLHOST_PDM_QUEUE_FLUSH:
    2196         {
    2197             PDMR3QueueFlushWorker(pVM, NULL);
    2198             pVM->vmm.s.rcCallHost = VINF_SUCCESS;
    2199             break;
    2200         }
    2201 
    2202         /*
    2203          * Grow the PGM pool.
    2204          */
    2205         case VMMCALLHOST_PGM_POOL_GROW:
    2206         {
    2207             pVM->vmm.s.rcCallHost = PGMR3PoolGrow(pVM);
    2208             break;
    2209         }
    2210 
    2211         /*
    2212          * Maps an page allocation chunk into ring-3 so ring-0 can use it.
    2213          */
    2214         case VMMCALLHOST_PGM_MAP_CHUNK:
    2215         {
    2216             pVM->vmm.s.rcCallHost = PGMR3PhysChunkMap(pVM, pVM->vmm.s.u64CallHostArg);
    2217             break;
    2218         }
    2219 
    2220         /*
    2221          * Allocates more handy pages.
    2222          */
    2223         case VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES:
    2224         {
    2225             pVM->vmm.s.rcCallHost = PGMR3PhysAllocateHandyPages(pVM);
    2226             break;
    2227         }
    2228 #ifndef VBOX_WITH_NEW_PHYS_CODE
    2229 
    2230         case VMMCALLHOST_PGM_RAM_GROW_RANGE:
    2231         {
    2232             const RTGCPHYS GCPhys = pVM->vmm.s.u64CallHostArg;
    2233             pVM->vmm.s.rcCallHost = PGM3PhysGrowRange(pVM, &GCPhys);
    2234             break;
    2235         }
    2236 #endif
    2237 
    2238         /*
    2239          * Acquire the PGM lock.
    2240          */
    2241         case VMMCALLHOST_PGM_LOCK:
    2242         {
    2243             pVM->vmm.s.rcCallHost = PGMR3LockCall(pVM);
    2244             break;
    2245         }
    2246 
    2247         /*
    2248          * Flush REM handler notifications.
    2249          */
    2250         case VMMCALLHOST_REM_REPLAY_HANDLER_NOTIFICATIONS:
    2251         {
    2252             REMR3ReplayHandlerNotifications(pVM);
    2253             break;
    2254         }
    2255 
    2256         /*
    2257          * This is a noop. We just take this route to avoid unnecessary
    2258          * tests in the loops.
    2259          */
    2260         case VMMCALLHOST_VMM_LOGGER_FLUSH:
    2261             break;
    2262 
    2263         /*
    2264          * Set the VM error message.
    2265          */
    2266         case VMMCALLHOST_VM_SET_ERROR:
    2267             VMR3SetErrorWorker(pVM);
    2268             break;
    2269 
    2270         /*
    2271          * Set the VM runtime error message.
    2272          */
    2273         case VMMCALLHOST_VM_SET_RUNTIME_ERROR:
    2274             VMR3SetRuntimeErrorWorker(pVM);
    2275             break;
    2276 
    2277         /*
    2278          * Signal a ring 0 hypervisor assertion.
    2279          * Cancel the longjmp operation that's in progress.
    2280          */
    2281         case VMMCALLHOST_VM_R0_ASSERTION:
    2282             pVM->vmm.s.enmCallHostOperation = VMMCALLHOST_INVALID;
    2283             pVM->vmm.s.CallHostR0JmpBuf.fInRing3Call = false;
    2284 #ifdef RT_ARCH_X86
    2285             pVM->vmm.s.CallHostR0JmpBuf.eip = 0;
    2286 #else
    2287             pVM->vmm.s.CallHostR0JmpBuf.rip = 0;
    2288 #endif
    2289             LogRel((pVM->vmm.s.szRing0AssertMsg1));
    2290             LogRel((pVM->vmm.s.szRing0AssertMsg2));
    2291             return VERR_VMM_RING0_ASSERTION;
    2292 
    2293         default:
    2294             AssertMsgFailed(("enmCallHostOperation=%d\n", pVM->vmm.s.enmCallHostOperation));
    2295             return VERR_INTERNAL_ERROR;
    2296     }
    2297 
    2298     pVM->vmm.s.enmCallHostOperation = VMMCALLHOST_INVALID;
    2299     return VINF_SUCCESS;
    2300 }
    2301 
    2302 
    2303 /**
    2304  * Displays the Force action Flags.
    2305  *
    2306  * @param   pVM         The VM handle.
    2307  * @param   pHlp        The output helpers.
    2308  * @param   pszArgs     The additional arguments (ignored).
    2309  */
    2310 static DECLCALLBACK(void) vmmR3InfoFF(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
    2311 {
    2312     const uint32_t fForcedActions = pVM->fForcedActions;
    2313 
    2314     pHlp->pfnPrintf(pHlp, "Forced action Flags: %#RX32", fForcedActions);
    2315 
    2316     /* show the flag mnemonics  */
    2317     int c = 0;
    2318     uint32_t f = fForcedActions;
    2319 #define PRINT_FLAG(flag) do { \
    2320         if (f & (flag)) \
    2321         { \
    2322             static const char *s_psz = #flag; \
    2323             if (!(c % 6)) \
    2324                 pHlp->pfnPrintf(pHlp, "%s\n    %s", c ? "," : "", s_psz + 6); \
    2325             else \
    2326                 pHlp->pfnPrintf(pHlp, ", %s", s_psz + 6); \
    2327             c++; \
    2328             f &= ~(flag); \
    2329         } \
    2330     } while (0)
    2331     PRINT_FLAG(VM_FF_INTERRUPT_APIC);
    2332     PRINT_FLAG(VM_FF_INTERRUPT_PIC);
    2333     PRINT_FLAG(VM_FF_TIMER);
    2334     PRINT_FLAG(VM_FF_PDM_QUEUES);
    2335     PRINT_FLAG(VM_FF_PDM_DMA);
    2336     PRINT_FLAG(VM_FF_PDM_CRITSECT);
    2337     PRINT_FLAG(VM_FF_DBGF);
    2338     PRINT_FLAG(VM_FF_REQUEST);
    2339     PRINT_FLAG(VM_FF_TERMINATE);
    2340     PRINT_FLAG(VM_FF_RESET);
    2341     PRINT_FLAG(VM_FF_PGM_SYNC_CR3);
    2342     PRINT_FLAG(VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
    2343     PRINT_FLAG(VM_FF_TRPM_SYNC_IDT);
    2344     PRINT_FLAG(VM_FF_SELM_SYNC_TSS);
    2345     PRINT_FLAG(VM_FF_SELM_SYNC_GDT);
    2346     PRINT_FLAG(VM_FF_SELM_SYNC_LDT);
    2347     PRINT_FLAG(VM_FF_INHIBIT_INTERRUPTS);
    2348     PRINT_FLAG(VM_FF_CSAM_SCAN_PAGE);
    2349     PRINT_FLAG(VM_FF_CSAM_PENDING_ACTION);
    2350     PRINT_FLAG(VM_FF_TO_R3);
    2351     PRINT_FLAG(VM_FF_DEBUG_SUSPEND);
    2352     if (f)
    2353         pHlp->pfnPrintf(pHlp, "%s\n    Unknown bits: %#RX32\n", c ? "," : "", f);
    2354     else
    2355         pHlp->pfnPrintf(pHlp, "\n");
    2356 #undef PRINT_FLAG
    2357 
    2358     /* the groups */
    2359     c = 0;
    2360 #define PRINT_GROUP(grp) do { \
    2361         if (fForcedActions & (grp)) \
    2362         { \
    2363             static const char *s_psz = #grp; \
    2364             if (!(c % 5)) \
    2365                 pHlp->pfnPrintf(pHlp, "%s    %s", c ? ",\n" : "Groups:\n", s_psz + 6); \
    2366             else \
    2367                 pHlp->pfnPrintf(pHlp, ", %s", s_psz + 6); \
    2368             c++; \
    2369         } \
    2370     } while (0)
    2371     PRINT_GROUP(VM_FF_EXTERNAL_SUSPENDED_MASK);
    2372     PRINT_GROUP(VM_FF_EXTERNAL_HALTED_MASK);
    2373     PRINT_GROUP(VM_FF_HIGH_PRIORITY_PRE_MASK);
    2374     PRINT_GROUP(VM_FF_HIGH_PRIORITY_PRE_RAW_MASK);
    2375     PRINT_GROUP(VM_FF_HIGH_PRIORITY_POST_MASK);
    2376     PRINT_GROUP(VM_FF_NORMAL_PRIORITY_POST_MASK);
    2377     PRINT_GROUP(VM_FF_NORMAL_PRIORITY_MASK);
    2378     PRINT_GROUP(VM_FF_RESUME_GUEST_MASK);
    2379     PRINT_GROUP(VM_FF_ALL_BUT_RAW_MASK);
    2380     if (c)
    2381         pHlp->pfnPrintf(pHlp, "\n");
    2382 #undef PRINT_GROUP
    2383 }
    2384 
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette