VirtualBox

Changeset 92392 in vbox for trunk/src/VBox/VMM/VMMR3


Ignore:
Timestamp:
Nov 12, 2021 10:39:56 AM (3 years ago)
Author:
vboxsync
Message:

VMM: Removed the callring-3 API and some of the associated stuff. bugref:10093

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR3/VMM.cpp

    r92391 r92392  
    174174static VBOXSTRICTRC         vmmR3EmtRendezvousCommon(PVM pVM, PVMCPU pVCpu, bool fIsCaller,
    175175                                                     uint32_t fFlags, PFNVMMEMTRENDEZVOUS pfnRendezvous, void *pvUser);
    176 static int                  vmmR3ServiceCallRing3Request(PVM pVM, PVMCPU pVCpu);
     176static int                  vmmR3HandleRing0Assert(PVM pVM, PVMCPU pVCpu);
    177177static FNRTTHREAD           vmmR3LogFlusher;
    178178static void                 vmmR3LogReturnFlush(PVM pVM, PVMCPU pVCpu, PVMMR3CPULOGGER pShared, size_t idxBuf,
     
    429429    STAM_REG(pVM, &pVM->vmm.s.StatRZRetPendingRequest,      STAMTYPE_COUNTER, "/VMM/RZRet/PendingRequest",      STAMUNIT_OCCURENCES, "Number of VINF_EM_PENDING_REQUEST returns.");
    430430    STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchTPR,            STAMTYPE_COUNTER, "/VMM/RZRet/PatchTPR",            STAMUNIT_OCCURENCES, "Number of VINF_EM_HM_PATCH_TPR_INSTR returns.");
    431     STAM_REG(pVM, &pVM->vmm.s.StatRZRetCallRing3,           STAMTYPE_COUNTER, "/VMM/RZCallR3/Misc",             STAMUNIT_OCCURENCES, "Number of Other ring-3 calls.");
    432431
    433432    STAMR3Register(pVM, &pVM->vmm.s.StatLogFlusherFlushes,  STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, "/VMM/LogFlush/00-Flushes",  STAMUNIT_OCCURENCES, "Total number of buffer flushes");
     
    525524     * Call Ring-0 entry with init code.
    526525     */
    527     for (;;)
    528     {
    529526#ifdef NO_SUPCALLR0VMM
    530         //rc = VERR_GENERAL_FAILURE;
    531         rc = VINF_SUCCESS;
     527    //rc = VERR_GENERAL_FAILURE;
     528    rc = VINF_SUCCESS;
    532529#else
    533         rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), 0 /*idCpu*/, VMMR0_DO_VMMR0_INIT, RT_MAKE_U64(VMMGetSvnRev(), vmmGetBuildType()), NULL);
    534 #endif
    535         /*
    536          * Flush the logs.
    537          */
     530    rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), 0 /*idCpu*/, VMMR0_DO_VMMR0_INIT, RT_MAKE_U64(VMMGetSvnRev(), vmmGetBuildType()), NULL);
     531#endif
     532
     533    /*
     534     * Flush the logs & deal with assertions.
     535     */
    538536#ifdef LOG_ENABLED
    539         VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL);
    540 #endif
    541         VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance());
    542         if (rc != VINF_VMM_CALL_HOST)
    543             break;
    544         rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
    545         if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
    546             break;
    547         /* Resume R0 */
    548     }
    549 
     537    VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL);
     538#endif
     539    VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance());
     540    if (rc == VERR_VMM_RING0_ASSERTION)
     541        rc = vmmR3HandleRing0Assert(pVM, pVCpu);
    550542    if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
    551543    {
     
    555547    }
    556548
     549    /*
     550     * Log stuff we learned in ring-0.
     551     */
    557552    /* Log whether thread-context hooks are used (on Linux this can depend on how the kernel is configured). */
    558553    if (pVM->vmm.s.fIsUsingContextHooks)
     
    657652     * Call Ring-0 entry with termination code.
    658653     */
    659     int rc;
    660     for (;;)
    661     {
    662654#ifdef NO_SUPCALLR0VMM
    663         //rc = VERR_GENERAL_FAILURE;
    664         rc = VINF_SUCCESS;
     655    //rc = VERR_GENERAL_FAILURE;
     656    int rc = VINF_SUCCESS;
    665657#else
    666         rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), 0 /*idCpu*/, VMMR0_DO_VMMR0_TERM, 0, NULL);
    667 #endif
    668         /*
    669          * Flush the logs.
    670          */
     658    int rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), 0 /*idCpu*/, VMMR0_DO_VMMR0_TERM, 0, NULL);
     659#endif
     660
     661    /*
     662     * Flush the logs & deal with assertions.
     663     */
    671664#ifdef LOG_ENABLED
    672         VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL);
    673 #endif
    674         VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance());
    675         if (rc != VINF_VMM_CALL_HOST)
    676             break;
    677         rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
    678         if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
    679             break;
    680         /* Resume R0 */
    681     }
     665    VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL);
     666#endif
     667    VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance());
     668    if (rc == VERR_VMM_RING0_ASSERTION)
     669        rc = vmmR3HandleRing0Assert(pVM, pVCpu);
    682670    if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
    683671    {
     
    687675    }
    688676
     677    /*
     678     * Do clean ups.
     679     */
    689680    for (VMCPUID i = 0; i < pVM->cCpus; i++)
    690681    {
     
    12381229    Log2(("VMMR3HmRunGC: (cs:rip=%04x:%RX64)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
    12391230
    1240     for (;;)
    1241     {
    1242         int rc;
    1243         do
    1244         {
     1231    int rc;
     1232    do
     1233    {
    12451234#ifdef NO_SUPCALLR0VMM
    1246             rc = VERR_GENERAL_FAILURE;
     1235        rc = VERR_GENERAL_FAILURE;
    12471236#else
    1248             rc = SUPR3CallVMMR0Fast(VMCC_GET_VMR0_FOR_CALL(pVM), VMMR0_DO_HM_RUN, pVCpu->idCpu);
    1249             if (RT_LIKELY(rc == VINF_SUCCESS))
    1250                 rc = pVCpu->vmm.s.iLastGZRc;
    1251 #endif
    1252         } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
     1237        rc = SUPR3CallVMMR0Fast(VMCC_GET_VMR0_FOR_CALL(pVM), VMMR0_DO_HM_RUN, pVCpu->idCpu);
     1238        if (RT_LIKELY(rc == VINF_SUCCESS))
     1239            rc = pVCpu->vmm.s.iLastGZRc;
     1240#endif
     1241    } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
    12531242
    12541243#if 0 /** @todo triggers too often */
    1255         Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3));
    1256 #endif
    1257 
    1258         /*
    1259          * Flush the logs
    1260          */
     1244    Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3));
     1245#endif
     1246
     1247    /*
     1248     * Flush the logs
     1249     */
    12611250#ifdef LOG_ENABLED
    1262         VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL);
    1263 #endif
    1264         VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance());
    1265         if (rc != VINF_VMM_CALL_HOST)
    1266         {
    1267             Log2(("VMMR3HmRunGC: returns %Rrc (cs:rip=%04x:%RX64)\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
    1268             return rc;
    1269         }
    1270         rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
    1271         if (RT_FAILURE(rc))
    1272             return rc;
    1273         /* Resume R0 */
    1274     }
     1251    VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL);
     1252#endif
     1253    VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance());
     1254    if (rc != VERR_VMM_RING0_ASSERTION)
     1255    {
     1256        Log2(("VMMR3HmRunGC: returns %Rrc (cs:rip=%04x:%RX64)\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
     1257        return rc;
     1258    }
     1259    return vmmR3HandleRing0Assert(pVM, pVCpu);
    12751260}
    12761261
     
    12861271VMMR3_INT_DECL(VBOXSTRICTRC) VMMR3CallR0EmtFast(PVM pVM, PVMCPU pVCpu, VMMR0OPERATION enmOperation)
    12871272{
    1288     for (;;)
    1289     {
    1290         VBOXSTRICTRC rcStrict;
    1291         do
    1292         {
     1273    VBOXSTRICTRC rcStrict;
     1274    do
     1275    {
    12931276#ifdef NO_SUPCALLR0VMM
    1294             rcStrict = VERR_GENERAL_FAILURE;
     1277        rcStrict = VERR_GENERAL_FAILURE;
    12951278#else
    1296             rcStrict = SUPR3CallVMMR0Fast(VMCC_GET_VMR0_FOR_CALL(pVM), enmOperation, pVCpu->idCpu);
    1297             if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    1298                 rcStrict = pVCpu->vmm.s.iLastGZRc;
    1299 #endif
    1300         } while (rcStrict == VINF_EM_RAW_INTERRUPT_HYPER);
    1301 
    1302         /*
    1303          * Flush the logs
    1304          */
     1279        rcStrict = SUPR3CallVMMR0Fast(VMCC_GET_VMR0_FOR_CALL(pVM), enmOperation, pVCpu->idCpu);
     1280        if (RT_LIKELY(rcStrict == VINF_SUCCESS))
     1281            rcStrict = pVCpu->vmm.s.iLastGZRc;
     1282#endif
     1283    } while (rcStrict == VINF_EM_RAW_INTERRUPT_HYPER);
     1284
     1285    /*
     1286     * Flush the logs
     1287     */
    13051288#ifdef LOG_ENABLED
    1306         VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL);
    1307 #endif
    1308         VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance());
    1309         if (rcStrict != VINF_VMM_CALL_HOST)
    1310             return rcStrict;
    1311         int rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
    1312         if (RT_FAILURE(rc))
    1313             return rc;
    1314         /* Resume R0 */
    1315     }
     1289    VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL);
     1290#endif
     1291    VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance());
     1292    if (rcStrict != VERR_VMM_RING0_ASSERTION)
     1293        return rcStrict;
     1294    return vmmR3HandleRing0Assert(pVM, pVCpu);
    13161295}
    13171296
     
    24492428VMMR3_INT_DECL(int) VMMR3CallR0Emt(PVM pVM, PVMCPU pVCpu, VMMR0OPERATION enmOperation, uint64_t u64Arg, PSUPVMMR0REQHDR pReqHdr)
    24502429{
    2451     int rc;
    2452     for (;;)
    2453     {
     2430    /*
     2431     * Call ring-0.
     2432     */
    24542433#ifdef NO_SUPCALLR0VMM
    2455         rc = VERR_GENERAL_FAILURE;
     2434    int rc = VERR_GENERAL_FAILURE;
    24562435#else
    2457         rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), pVCpu->idCpu, enmOperation, u64Arg, pReqHdr);
    2458 #endif
    2459         /*
    2460          * Flush the logs.
    2461          */
     2436    int rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), pVCpu->idCpu, enmOperation, u64Arg, pReqHdr);
     2437#endif
     2438
     2439    /*
     2440     * Flush the logs and deal with ring-0 assertions.
     2441     */
    24622442#ifdef LOG_ENABLED
    2463         VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL);
    2464 #endif
    2465         VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance());
    2466         if (rc != VINF_VMM_CALL_HOST)
    2467             break;
    2468         rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
    2469         if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
    2470             break;
    2471         /* Resume R0 */
    2472     }
    2473 
    2474     AssertLogRelMsgReturn(rc == VINF_SUCCESS || RT_FAILURE(rc),
    2475                           ("enmOperation=%u rc=%Rrc\n", enmOperation, rc),
    2476                           VERR_IPE_UNEXPECTED_INFO_STATUS);
    2477     return rc;
    2478 }
    2479 
    2480 
    2481 /**
    2482  * Service a call to the ring-3 host code.
     2443    VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL);
     2444#endif
     2445    VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance());
     2446    if (rc != VERR_VMM_RING0_ASSERTION)
     2447    {
     2448        AssertLogRelMsgReturn(rc == VINF_SUCCESS || RT_FAILURE(rc),
     2449                              ("enmOperation=%u rc=%Rrc\n", enmOperation, rc),
     2450                              VERR_IPE_UNEXPECTED_INFO_STATUS);
     2451        return rc;
     2452    }
     2453    return vmmR3HandleRing0Assert(pVM, pVCpu);
     2454}
     2455
     2456
     2457/**
     2458 * Logs a ring-0 assertion ASAP after returning to ring-3.
    24832459 *
    24842460 * @returns VBox status code.
    2485  * @param   pVM     The cross context VM structure.
    2486  * @param   pVCpu   The cross context virtual CPU structure.
    2487  * @remarks Careful with critsects.
    2488  */
    2489 static int vmmR3ServiceCallRing3Request(PVM pVM, PVMCPU pVCpu)
    2490 {
    2491     /*
    2492      * We must also check for pending critsect exits or else we can deadlock
    2493      * when entering other critsects here.
    2494      */
    2495     if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
    2496         PDMCritSectBothFF(pVM, pVCpu);
    2497 
    2498     switch (pVCpu->vmm.s.enmCallRing3Operation)
    2499     {
    2500         /*
    2501          * Signal a ring 0 hypervisor assertion.
    2502          * Cancel the longjmp operation that's in progress.
    2503          */
    2504         case VMMCALLRING3_VM_R0_ASSERTION:
    2505             pVCpu->vmm.s.enmCallRing3Operation = VMMCALLRING3_INVALID;
    2506             pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call = false;
     2461 * @param   pVM         The cross context VM structure.
     2462 * @param   pVCpu       The cross context virtual CPU structure.
     2463 */
     2464static int vmmR3HandleRing0Assert(PVM pVM, PVMCPU pVCpu)
     2465{
     2466    /*
     2467     * Signal a ring 0 hypervisor assertion.
     2468     * Cancel the longjmp operation that's in progress.
     2469     */
     2470    pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call = false;
    25072471#ifdef RT_ARCH_X86
    2508             pVCpu->vmm.s.CallRing3JmpBufR0.eip = 0;
     2472    pVCpu->vmm.s.CallRing3JmpBufR0.eip = 0;
    25092473#else
    2510             pVCpu->vmm.s.CallRing3JmpBufR0.rip = 0;
     2474    pVCpu->vmm.s.CallRing3JmpBufR0.rip = 0;
    25112475#endif
    25122476#ifdef VMM_R0_SWITCH_STACK
    2513             *(uint64_t *)pVCpu->vmm.s.pbEMTStackR3 = 0; /* clear marker  */
    2514 #endif
    2515             LogRel(("%s", pVM->vmm.s.szRing0AssertMsg1));
    2516             LogRel(("%s", pVM->vmm.s.szRing0AssertMsg2));
    2517             return VERR_VMM_RING0_ASSERTION;
    2518 
    2519         default:
    2520             AssertMsgFailed(("enmCallRing3Operation=%d\n", pVCpu->vmm.s.enmCallRing3Operation));
    2521             return VERR_VMM_UNKNOWN_RING3_CALL;
    2522     }
     2477    *(uint64_t *)pVCpu->vmm.s.pbEMTStackR3 = 0; /* clear marker  */
     2478#endif
     2479    LogRel(("%s", pVM->vmm.s.szRing0AssertMsg1));
     2480    LogRel(("%s", pVM->vmm.s.szRing0AssertMsg2));
     2481    return VERR_VMM_RING0_ASSERTION;
    25232482}
    25242483
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette