VirtualBox

Changeset 92392 in vbox


Ignore:
Timestamp:
Nov 12, 2021 10:39:56 AM (3 years ago)
Author:
vboxsync
Message:

VMM: Removed the callring-3 API and some of the associated stuff. bugref:10093

Location:
trunk
Files:
15 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/err.h

    r92248 r92392  
    12581258 * @{
    12591259 */
    1260 /** Reason for leaving RZ: Calling host function. */
    1261 #define VINF_VMM_CALL_HOST                  2700
    12621260/** Reason for leaving R0: Hit a ring-0 assertion on EMT. */
    12631261#define VERR_VMM_RING0_ASSERTION            (-2701)
  • trunk/include/VBox/err.mac

    r87439 r92392  
    488488%define VERR_IOM_MMIO_REGION_ALREADY_MAPPED    (-2662)
    489489%define VERR_IOM_MMIO_REGION_NOT_MAPPED    (-2663)
    490 %define VINF_VMM_CALL_HOST    2700
    491490%define VERR_VMM_RING0_ASSERTION    (-2701)
    492491%define VERR_VMM_HYPER_CR3_MISMATCH    (-2702)
  • trunk/include/VBox/vmm/vmm.h

    r92391 r92392  
    4949
    5050/**
    51  * VMMRZCallRing3 operations.
    52  */
    53 typedef enum VMMCALLRING3
    54 {
    55     /** Invalid operation.  */
    56     VMMCALLRING3_INVALID = 0,
    57     /** Signal a ring 0 assertion. */
    58     VMMCALLRING3_VM_R0_ASSERTION,
    59     /** The usual 32-bit hack. */
    60     VMMCALLRING3_32BIT_HACK = 0x7fffffff
    61 } VMMCALLRING3;
    62 
    63 /**
    64  * VMMRZCallRing3 notification callback.
     51 * Ring-0 assertion notification callback.
    6552 *
    6653 * @returns VBox status code.
    6754 * @param   pVCpu           The cross context virtual CPU structure.
    68  * @param   enmOperation    The operation causing the ring-3 jump.
    6955 * @param   pvUser          The user argument.
    7056 */
    71 typedef DECLCALLBACKTYPE(int, FNVMMR0CALLRING3NOTIFICATION,(PVMCPUCC pVCpu, VMMCALLRING3 enmOperation, void *pvUser));
    72 /** Pointer to a FNRTMPNOTIFICATION(). */
    73 typedef FNVMMR0CALLRING3NOTIFICATION *PFNVMMR0CALLRING3NOTIFICATION;
     57typedef DECLCALLBACKTYPE(int, FNVMMR0ASSERTIONNOTIFICATION,(PVMCPUCC pVCpu, void *pvUser));
     58/** Pointer to a FNVMMR0ASSERTIONNOTIFICATION(). */
     59typedef FNVMMR0ASSERTIONNOTIFICATION *PFNVMMR0ASSERTIONNOTIFICATION;
    7460
    7561/**
     
    513499VMMR0_INT_DECL(int)  VMMR0EmtSignalSupEvent(PGVM pGVM, PGVMCPU pGVCpu, SUPSEMEVENT hEvent);
    514500VMMR0_INT_DECL(int)  VMMR0EmtSignalSupEventByGVM(PGVM pGVM, SUPSEMEVENT hEvent);
     501VMMR0_INT_DECL(int)  VMMR0AssertionSetNotification(PVMCPUCC pVCpu, PFNVMMR0ASSERTIONNOTIFICATION pfnCallback, RTR0PTR pvUser);
     502VMMR0_INT_DECL(void) VMMR0AssertionRemoveNotification(PVMCPUCC pVCpu);
     503VMMR0_INT_DECL(bool) VMMR0AssertionIsNotificationSet(PVMCPUCC pVCpu);
    515504
    516505/** @name VMMR0EMTWAIT_F_XXX - flags for VMMR0EmtWaitEventInner and friends.
     
    592581 * @{
    593582 */
    594 VMMRZDECL(int)      VMMRZCallRing3(PVMCC pVMCC, PVMCPUCC pVCpu, VMMCALLRING3 enmOperation, uint64_t uArg);
    595 VMMRZDECL(int)      VMMRZCallRing3NoCpu(PVMCC pVM, VMMCALLRING3 enmOperation, uint64_t uArg);
    596583VMMRZDECL(void)     VMMRZCallRing3Disable(PVMCPUCC pVCpu);
    597584VMMRZDECL(void)     VMMRZCallRing3Enable(PVMCPUCC pVCpu);
    598585VMMRZDECL(bool)     VMMRZCallRing3IsEnabled(PVMCPUCC pVCpu);
    599 VMMRZDECL(int)      VMMRZCallRing3SetNotification(PVMCPUCC pVCpu, R0PTRTYPE(PFNVMMR0CALLRING3NOTIFICATION) pfnCallback, RTR0PTR pvUser);
    600 VMMRZDECL(void)     VMMRZCallRing3RemoveNotification(PVMCPUCC pVCpu);
    601 VMMRZDECL(bool)     VMMRZCallRing3IsNotificationSet(PVMCPUCC pVCpu);
    602586/** @} */
    603587#endif
  • trunk/src/VBox/VMM/VMMR0/HMR0.cpp

    r91323 r92392  
    7878    DECLR0CALLBACKMEMBER(int,          pfnEnterSession, (PVMCPUCC pVCpu));
    7979    DECLR0CALLBACKMEMBER(void,         pfnThreadCtxCallback, (RTTHREADCTXEVENT enmEvent, PVMCPUCC pVCpu, bool fGlobalInit));
    80     DECLR0CALLBACKMEMBER(int,          pfnCallRing3Callback, (PVMCPUCC pVCpu, VMMCALLRING3 enmOperation));
     80    DECLR0CALLBACKMEMBER(int,          pfnAssertionCallback, (PVMCPUCC pVCpu));
    8181    DECLR0CALLBACKMEMBER(int,          pfnExportHostState, (PVMCPUCC pVCpu));
    8282    DECLR0CALLBACKMEMBER(VBOXSTRICTRC, pfnRunGuestCode, (PVMCPUCC pVCpu));
     
    162162    /* .pfnEnterSession = */        VMXR0Enter,
    163163    /* .pfnThreadCtxCallback = */   VMXR0ThreadCtxCallback,
    164     /* .pfnCallRing3Callback = */   VMXR0CallRing3Callback,
     164    /* .pfnAssertionCallback = */   VMXR0AssertionCallback,
    165165    /* .pfnExportHostState = */     VMXR0ExportHostState,
    166166    /* .pfnRunGuestCode = */        VMXR0RunGuestCode,
     
    177177    /* .pfnEnterSession = */        SVMR0Enter,
    178178    /* .pfnThreadCtxCallback = */   SVMR0ThreadCtxCallback,
    179     /* .pfnCallRing3Callback = */   SVMR0CallRing3Callback,
     179    /* .pfnAssertionCallback = */   SVMR0AssertionCallback,
    180180    /* .pfnExportHostState = */     SVMR0ExportHostState,
    181181    /* .pfnRunGuestCode = */        SVMR0RunGuestCode,
     
    233233}
    234234
    235 static DECLCALLBACK(int) hmR0DummyCallRing3Callback(PVMCPUCC pVCpu, VMMCALLRING3 enmOperation)
    236 {
    237     RT_NOREF(pVCpu, enmOperation);
     235static DECLCALLBACK(int) hmR0DummyAssertionCallback(PVMCPUCC pVCpu)
     236{
     237    RT_NOREF(pVCpu);
    238238    return VINF_SUCCESS;
    239239}
     
    256256    /* .pfnEnterSession = */        hmR0DummyEnter,
    257257    /* .pfnThreadCtxCallback = */   hmR0DummyThreadCtxCallback,
    258     /* .pfnCallRing3Callback = */   hmR0DummyCallRing3Callback,
     258    /* .pfnAssertionCallback = */   hmR0DummyAssertionCallback,
    259259    /* .pfnExportHostState = */     hmR0DummyExportHostState,
    260260    /* .pfnRunGuestCode = */        hmR0DummyRunGuestCode,
     
    13731373
    13741374/**
    1375  * Notification callback before performing a longjump to ring-3.
     1375 * Notification callback before an assertion longjump and guru mediation.
    13761376 *
    13771377 * @returns VBox status code.
    13781378 * @param   pVCpu           The cross context virtual CPU structure.
    1379  * @param   enmOperation    The operation causing the ring-3 longjump.
    13801379 * @param   pvUser          User argument, currently unused, NULL.
    13811380 */
    1382 static DECLCALLBACK(int) hmR0CallRing3Callback(PVMCPUCC pVCpu, VMMCALLRING3 enmOperation, void *pvUser)
     1381static DECLCALLBACK(int) hmR0AssertionCallback(PVMCPUCC pVCpu, void *pvUser)
    13831382{
    13841383    RT_NOREF(pvUser);
    13851384    Assert(pVCpu);
    1386     Assert(g_HmR0Ops.pfnCallRing3Callback);
    1387     return g_HmR0Ops.pfnCallRing3Callback(pVCpu, enmOperation);
     1385    Assert(g_HmR0Ops.pfnAssertionCallback);
     1386    return g_HmR0Ops.pfnAssertionCallback(pVCpu);
    13881387}
    13891388
     
    14121411
    14131412    /* Register a callback to fire prior to performing a longjmp to ring-3 so HM can disable VT-x/AMD-V if needed. */
    1414     VMMRZCallRing3SetNotification(pVCpu, hmR0CallRing3Callback, NULL /* pvUser */);
     1413    VMMR0AssertionSetNotification(pVCpu, hmR0AssertionCallback, NULL /*pvUser*/);
    14151414
    14161415    /* Reload host-state (back from ring-3/migrated CPUs) and shared guest/host bits. */
     
    14971496
    14981497    /* De-register the longjmp-to-ring 3 callback now that we have reliquished hardware resources. */
    1499     VMMRZCallRing3RemoveNotification(pVCpu);
     1498    VMMR0AssertionRemoveNotification(pVCpu);
    15001499    return VINF_SUCCESS;
    15011500}
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r91587 r92392  
    30433043
    30443044/**
    3045  * Does the necessary state syncing before doing a longjmp to ring-3.
    3046  *
    3047  * @returns VBox status code.
    3048  * @param   pVCpu       The cross context virtual CPU structure.
    3049  *
    3050  * @remarks No-long-jmp zone!!!
    3051  */
    3052 static int hmR0SvmLongJmpToRing3(PVMCPUCC pVCpu)
    3053 {
    3054     return hmR0SvmLeaveSession(pVCpu);
    3055 }
    3056 
    3057 
    3058 /**
    30593045 * VMMRZCallRing3() callback wrapper which saves the guest state (or restores
    3060  * any remaining host state) before we longjump to ring-3 and possibly get
    3061  * preempted.
     3046 * any remaining host state) before we go back to ring-3 due to an assertion.
    30623047 *
    30633048 * @param   pVCpu           The cross context virtual CPU structure.
    3064  * @param   enmOperation    The operation causing the ring-3 longjump.
    3065  */
    3066 VMMR0DECL(int) SVMR0CallRing3Callback(PVMCPUCC pVCpu, VMMCALLRING3 enmOperation)
    3067 {
    3068     if (enmOperation == VMMCALLRING3_VM_R0_ASSERTION)
    3069     {
    3070         /*
    3071          * !!! IMPORTANT !!!
    3072          * If you modify code here, make sure to check whether hmR0SvmLeave() and hmR0SvmLeaveSession() needs
    3073          * to be updated too. This is a stripped down version which gets out ASAP trying to not trigger any assertion.
    3074          */
    3075         VMMRZCallRing3RemoveNotification(pVCpu);
    3076         VMMRZCallRing3Disable(pVCpu);
    3077         HM_DISABLE_PREEMPT(pVCpu);
    3078 
    3079         /* Import the entire guest state. */
    3080         hmR0SvmImportGuestState(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
    3081 
    3082         /* Restore host FPU state if necessary and resync on next R0 reentry. */
    3083         CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu);
    3084 
    3085         /* Restore host debug registers if necessary and resync on next R0 reentry. */
    3086         CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */);
    3087 
    3088         /* Deregister the hook now that we've left HM context before re-enabling preemption. */
    3089         /** @todo eliminate the need for calling VMMR0ThreadCtxHookDisable here!  */
    3090         VMMR0ThreadCtxHookDisable(pVCpu);
    3091 
    3092         /* Leave HM context. This takes care of local init (term). */
    3093         HMR0LeaveCpu(pVCpu);
    3094 
    3095         HM_RESTORE_PREEMPT();
    3096         return VINF_SUCCESS;
    3097     }
    3098 
    3099     Assert(pVCpu);
    3100     Assert(VMMRZCallRing3IsEnabled(pVCpu));
    3101     HMSVM_ASSERT_PREEMPT_SAFE(pVCpu);
    3102 
     3049 */
     3050VMMR0DECL(int) SVMR0AssertionCallback(PVMCPUCC pVCpu)
     3051{
     3052    /*
     3053     * !!! IMPORTANT !!!
     3054     * If you modify code here, make sure to check whether hmR0SvmLeave() and hmR0SvmLeaveSession() needs
     3055     * to be updated too. This is a stripped down version which gets out ASAP trying to not trigger any assertion.
     3056     */
     3057    VMMR0AssertionRemoveNotification(pVCpu);
    31033058    VMMRZCallRing3Disable(pVCpu);
    3104 
    3105     Log4Func(("Calling hmR0SvmLongJmpToRing3\n"));
    3106     int rc = hmR0SvmLongJmpToRing3(pVCpu);
    3107     AssertRCReturn(rc, rc);
    3108 
    3109     VMMRZCallRing3Enable(pVCpu);
     3059    HM_DISABLE_PREEMPT(pVCpu);
     3060
     3061    /* Import the entire guest state. */
     3062    hmR0SvmImportGuestState(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
     3063
     3064    /* Restore host FPU state if necessary and resync on next R0 reentry. */
     3065    CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu);
     3066
     3067    /* Restore host debug registers if necessary and resync on next R0 reentry. */
     3068    CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */);
     3069
     3070    /* Deregister the hook now that we've left HM context before re-enabling preemption. */
     3071    /** @todo eliminate the need for calling VMMR0ThreadCtxHookDisable here!  */
     3072    VMMR0ThreadCtxHookDisable(pVCpu);
     3073
     3074    /* Leave HM context. This takes care of local init (term). */
     3075    HMR0LeaveCpu(pVCpu);
     3076
     3077    HM_RESTORE_PREEMPT();
    31103078    return VINF_SUCCESS;
    31113079}
     
    48144782    rc = hmR0SvmExitToRing3(pVCpu, rc);
    48154783    Assert(!ASMAtomicUoReadU64(&pCtx->fExtrn));
    4816     Assert(!VMMRZCallRing3IsNotificationSet(pVCpu));
     4784    Assert(!VMMR0AssertionIsNotificationSet(pVCpu));
    48174785    return rc;
    48184786}
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.h

    r87387 r92392  
    4141VMMR0DECL(int)          SVMR0Enter(PVMCPUCC pVCpu);
    4242VMMR0DECL(void)         SVMR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPUCC pVCpu, bool fGlobalInit);
    43 VMMR0DECL(int)          SVMR0CallRing3Callback(PVMCPUCC pVCpu, VMMCALLRING3 enmOperation);
     43VMMR0DECL(int)          SVMR0AssertionCallback(PVMCPUCC pVCpu);
    4444VMMR0DECL(int)          SVMR0EnableCpu(PHMPHYSCPU pHostCpu, PVMCC pVM, void *pvPageCpu, RTHCPHYS HCPhysCpuPage,
    4545                                       bool fEnabledBySystem, PCSUPHWVIRTMSRS pHwvirtMsrs);
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r92216 r92392  
    86918691/**
    86928692 * VMMRZCallRing3() callback wrapper which saves the guest state before we
    8693  * longjump to ring-3 and possibly get preempted.
     8693 * longjump due to a ring-0 assertion.
    86948694 *
    86958695 * @returns VBox status code.
    86968696 * @param   pVCpu           The cross context virtual CPU structure.
    8697  * @param   enmOperation    The operation causing the ring-3 longjump.
    8698  */
    8699 VMMR0DECL(int) VMXR0CallRing3Callback(PVMCPUCC pVCpu, VMMCALLRING3 enmOperation)
    8700 {
    8701     if (enmOperation == VMMCALLRING3_VM_R0_ASSERTION)
    8702     {
    8703         /*
    8704          * !!! IMPORTANT !!!
    8705          * If you modify code here, check whether hmR0VmxLeave() and hmR0VmxLeaveSession() needs to be updated too.
    8706          * This is a stripped down version which gets out ASAP, trying to not trigger any further assertions.
    8707          */
    8708         VMMRZCallRing3RemoveNotification(pVCpu);
    8709         VMMRZCallRing3Disable(pVCpu);
    8710         HM_DISABLE_PREEMPT(pVCpu);
    8711 
    8712         PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
    8713         hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
    8714         CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu);
    8715         CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */);
    8716 
    8717         /* Restore host-state bits that VT-x only restores partially. */
    8718         if (pVCpu->hmr0.s.vmx.fRestoreHostFlags > VMX_RESTORE_HOST_REQUIRED)
    8719             VMXRestoreHostState(pVCpu->hmr0.s.vmx.fRestoreHostFlags, &pVCpu->hmr0.s.vmx.RestoreHost);
    8720         pVCpu->hmr0.s.vmx.fRestoreHostFlags = 0;
    8721 
    8722         /* Restore the lazy host MSRs as we're leaving VT-x context. */
    8723         if (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
    8724             hmR0VmxLazyRestoreHostMsrs(pVCpu);
    8725 
    8726         /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */
    8727         pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
    8728         VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
    8729 
    8730         /* Clear the current VMCS data back to memory (shadow VMCS if any would have been
    8731            cleared as part of importing the guest state above. */
    8732         hmR0VmxClearVmcs(pVmcsInfo);
    8733 
    8734         /** @todo eliminate the need for calling VMMR0ThreadCtxHookDisable here!  */
    8735         VMMR0ThreadCtxHookDisable(pVCpu);
    8736 
    8737         /* Leave HM context. This takes care of local init (term). */
    8738         HMR0LeaveCpu(pVCpu);
    8739         HM_RESTORE_PREEMPT();
    8740         return VINF_SUCCESS;
    8741     }
    8742 
    8743     Assert(pVCpu);
    8744     Assert(VMMRZCallRing3IsEnabled(pVCpu));
    8745     HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
    8746 
     8697 */
     8698VMMR0DECL(int) VMXR0AssertionCallback(PVMCPUCC pVCpu)
     8699{
     8700    /*
     8701     * !!! IMPORTANT !!!
     8702     * If you modify code here, check whether hmR0VmxLeave() and hmR0VmxLeaveSession() needs to be updated too.
     8703     * This is a stripped down version which gets out ASAP, trying to not trigger any further assertions.
     8704     */
     8705    VMMR0AssertionRemoveNotification(pVCpu);
    87478706    VMMRZCallRing3Disable(pVCpu);
    8748 
    8749     Log4Func(("-> hmR0VmxLongJmpToRing3 enmOperation=%d\n", enmOperation));
    8750 
    8751     int rc = hmR0VmxLongJmpToRing3(pVCpu);
    8752     AssertRCReturn(rc, rc);
    8753 
    8754     VMMRZCallRing3Enable(pVCpu);
     8707    HM_DISABLE_PREEMPT(pVCpu);
     8708
     8709    PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
     8710    hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
     8711    CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu);
     8712    CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */);
     8713
     8714    /* Restore host-state bits that VT-x only restores partially. */
     8715    if (pVCpu->hmr0.s.vmx.fRestoreHostFlags > VMX_RESTORE_HOST_REQUIRED)
     8716        VMXRestoreHostState(pVCpu->hmr0.s.vmx.fRestoreHostFlags, &pVCpu->hmr0.s.vmx.RestoreHost);
     8717    pVCpu->hmr0.s.vmx.fRestoreHostFlags = 0;
     8718
     8719    /* Restore the lazy host MSRs as we're leaving VT-x context. */
     8720    if (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
     8721        hmR0VmxLazyRestoreHostMsrs(pVCpu);
     8722
     8723    /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */
     8724    pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
     8725    VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
     8726
     8727    /* Clear the current VMCS data back to memory (shadow VMCS if any would have been
     8728       cleared as part of importing the guest state above. */
     8729    hmR0VmxClearVmcs(pVmcsInfo);
     8730
     8731    /** @todo eliminate the need for calling VMMR0ThreadCtxHookDisable here!  */
     8732    VMMR0ThreadCtxHookDisable(pVCpu);
     8733
     8734    /* Leave HM context. This takes care of local init (term). */
     8735    HMR0LeaveCpu(pVCpu);
     8736    HM_RESTORE_PREEMPT();
    87558737    return VINF_SUCCESS;
    87568738}
     
    1297012952    }
    1297112953    Assert(!ASMAtomicUoReadU64(&pCtx->fExtrn));
    12972     Assert(!VMMRZCallRing3IsNotificationSet(pVCpu));
     12954    Assert(!VMMR0AssertionIsNotificationSet(pVCpu));
    1297312955    return rcStrict;
    1297412956}
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.h

    r87412 r92392  
    3333VMMR0DECL(int)          VMXR0Enter(PVMCPUCC pVCpu);
    3434VMMR0DECL(void)         VMXR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPUCC pVCpu, bool fGlobalInit);
    35 VMMR0DECL(int)          VMXR0CallRing3Callback(PVMCPUCC pVCpu, VMMCALLRING3 enmOperation);
     35VMMR0DECL(int)          VMXR0AssertionCallback(PVMCPUCC pVCpu);
    3636VMMR0DECL(int)          VMXR0EnableCpu(PHMPHYSCPU pHostCpu, PVMCC pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys,
    3737                                       bool fEnabledBySystem, PCSUPHWVIRTMSRS pHwvirtMsrs);
  • trunk/src/VBox/VMM/VMMR0/VMMR0.cpp

    r92391 r92392  
    12911291            STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
    12921292            break;
    1293         case VINF_VMM_CALL_HOST:
    1294             switch (pVCpu->vmm.s.enmCallRing3Operation)
    1295             {
    1296                 case VMMCALLRING3_VM_R0_ASSERTION:
    1297                 default:
    1298                     STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
    1299                     break;
    1300             }
    1301             break;
    13021293        case VINF_PATM_DUPLICATE_FUNCTION:
    13031294            STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
     
    14581449                            if (RT_UNLIKELY(   VMCPU_GET_STATE(pGVCpu) != VMCPUSTATE_STARTED_HM
    14591450                                            && RT_SUCCESS_NP(rc)
    1460                                             && rc != VINF_VMM_CALL_HOST ))
     1451                                            && rc != VERR_VMM_RING0_ASSERTION ))
    14611452                            {
    14621453                                pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
     
    35403531*********************************************************************************************************************************/
    35413532
     3533/**
     3534 * Installs a notification callback for ring-0 assertions.
     3535 *
     3536 * @param   pVCpu         The cross context virtual CPU structure.
     3537 * @param   pfnCallback   Pointer to the callback.
     3538 * @param   pvUser        The user argument.
     3539 *
     3540 * @return VBox status code.
     3541 */
     3542VMMR0_INT_DECL(int) VMMR0AssertionSetNotification(PVMCPUCC pVCpu, PFNVMMR0ASSERTIONNOTIFICATION pfnCallback, RTR0PTR pvUser)
     3543{
     3544    AssertPtrReturn(pVCpu, VERR_INVALID_POINTER);
     3545    AssertPtrReturn(pfnCallback, VERR_INVALID_POINTER);
     3546
     3547    if (!pVCpu->vmm.s.pfnRing0AssertCallback)
     3548    {
     3549        pVCpu->vmm.s.pfnRing0AssertCallback    = pfnCallback;
     3550        pVCpu->vmm.s.pvRing0AssertCallbackUser = pvUser;
     3551        return VINF_SUCCESS;
     3552    }
     3553    return VERR_ALREADY_EXISTS;
     3554}
     3555
     3556
     3557/**
     3558 * Removes the ring-0 callback.
     3559 *
     3560 * @param   pVCpu   The cross context virtual CPU structure.
     3561 */
     3562VMMR0_INT_DECL(void) VMMR0AssertionRemoveNotification(PVMCPUCC pVCpu)
     3563{
     3564    pVCpu->vmm.s.pfnRing0AssertCallback    = NULL;
     3565    pVCpu->vmm.s.pvRing0AssertCallbackUser = NULL;
     3566}
     3567
     3568
     3569/**
     3570 * Checks whether there is a ring-0 callback notification active.
     3571 *
     3572 * @param   pVCpu   The cross context virtual CPU structure.
     3573 * @returns true if there the notification is active, false otherwise.
     3574 */
     3575VMMR0_INT_DECL(bool) VMMR0AssertionIsNotificationSet(PVMCPUCC pVCpu)
     3576{
     3577    return pVCpu->vmm.s.pfnRing0AssertCallback != NULL;
     3578}
     3579
     3580
    35423581/*
    35433582 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
     
    35653604# endif
    35663605            {
    3567                 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
     3606                if (pVCpu->vmm.s.pfnRing0AssertCallback)
     3607                    pVCpu->vmm.s.pfnRing0AssertCallback(pVCpu, pVCpu->vmm.s.pvRing0AssertCallbackUser);
     3608                int rc = vmmR0CallRing3LongJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, VERR_VMM_RING0_ASSERTION);
    35683609                return RT_FAILURE_NP(rc);
    35693610            }
  • trunk/src/VBox/VMM/VMMR3/VMM.cpp

    r92391 r92392  
    174174static VBOXSTRICTRC         vmmR3EmtRendezvousCommon(PVM pVM, PVMCPU pVCpu, bool fIsCaller,
    175175                                                     uint32_t fFlags, PFNVMMEMTRENDEZVOUS pfnRendezvous, void *pvUser);
    176 static int                  vmmR3ServiceCallRing3Request(PVM pVM, PVMCPU pVCpu);
     176static int                  vmmR3HandleRing0Assert(PVM pVM, PVMCPU pVCpu);
    177177static FNRTTHREAD           vmmR3LogFlusher;
    178178static void                 vmmR3LogReturnFlush(PVM pVM, PVMCPU pVCpu, PVMMR3CPULOGGER pShared, size_t idxBuf,
     
    429429    STAM_REG(pVM, &pVM->vmm.s.StatRZRetPendingRequest,      STAMTYPE_COUNTER, "/VMM/RZRet/PendingRequest",      STAMUNIT_OCCURENCES, "Number of VINF_EM_PENDING_REQUEST returns.");
    430430    STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchTPR,            STAMTYPE_COUNTER, "/VMM/RZRet/PatchTPR",            STAMUNIT_OCCURENCES, "Number of VINF_EM_HM_PATCH_TPR_INSTR returns.");
    431     STAM_REG(pVM, &pVM->vmm.s.StatRZRetCallRing3,           STAMTYPE_COUNTER, "/VMM/RZCallR3/Misc",             STAMUNIT_OCCURENCES, "Number of Other ring-3 calls.");
    432431
    433432    STAMR3Register(pVM, &pVM->vmm.s.StatLogFlusherFlushes,  STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, "/VMM/LogFlush/00-Flushes",  STAMUNIT_OCCURENCES, "Total number of buffer flushes");
     
    525524     * Call Ring-0 entry with init code.
    526525     */
    527     for (;;)
    528     {
    529526#ifdef NO_SUPCALLR0VMM
    530         //rc = VERR_GENERAL_FAILURE;
    531         rc = VINF_SUCCESS;
     527    //rc = VERR_GENERAL_FAILURE;
     528    rc = VINF_SUCCESS;
    532529#else
    533         rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), 0 /*idCpu*/, VMMR0_DO_VMMR0_INIT, RT_MAKE_U64(VMMGetSvnRev(), vmmGetBuildType()), NULL);
    534 #endif
    535         /*
    536          * Flush the logs.
    537          */
     530    rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), 0 /*idCpu*/, VMMR0_DO_VMMR0_INIT, RT_MAKE_U64(VMMGetSvnRev(), vmmGetBuildType()), NULL);
     531#endif
     532
     533    /*
     534     * Flush the logs & deal with assertions.
     535     */
    538536#ifdef LOG_ENABLED
    539         VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL);
    540 #endif
    541         VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance());
    542         if (rc != VINF_VMM_CALL_HOST)
    543             break;
    544         rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
    545         if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
    546             break;
    547         /* Resume R0 */
    548     }
    549 
     537    VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL);
     538#endif
     539    VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance());
     540    if (rc == VERR_VMM_RING0_ASSERTION)
     541        rc = vmmR3HandleRing0Assert(pVM, pVCpu);
    550542    if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
    551543    {
     
    555547    }
    556548
     549    /*
     550     * Log stuff we learned in ring-0.
     551     */
    557552    /* Log whether thread-context hooks are used (on Linux this can depend on how the kernel is configured). */
    558553    if (pVM->vmm.s.fIsUsingContextHooks)
     
    657652     * Call Ring-0 entry with termination code.
    658653     */
    659     int rc;
    660     for (;;)
    661     {
    662654#ifdef NO_SUPCALLR0VMM
    663         //rc = VERR_GENERAL_FAILURE;
    664         rc = VINF_SUCCESS;
     655    //rc = VERR_GENERAL_FAILURE;
     656    int rc = VINF_SUCCESS;
    665657#else
    666         rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), 0 /*idCpu*/, VMMR0_DO_VMMR0_TERM, 0, NULL);
    667 #endif
    668         /*
    669          * Flush the logs.
    670          */
     658    int rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), 0 /*idCpu*/, VMMR0_DO_VMMR0_TERM, 0, NULL);
     659#endif
     660
     661    /*
     662     * Flush the logs & deal with assertions.
     663     */
    671664#ifdef LOG_ENABLED
    672         VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL);
    673 #endif
    674         VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance());
    675         if (rc != VINF_VMM_CALL_HOST)
    676             break;
    677         rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
    678         if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
    679             break;
    680         /* Resume R0 */
    681     }
     665    VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL);
     666#endif
     667    VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance());
     668    if (rc == VERR_VMM_RING0_ASSERTION)
     669        rc = vmmR3HandleRing0Assert(pVM, pVCpu);
    682670    if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
    683671    {
     
    687675    }
    688676
     677    /*
     678     * Do clean ups.
     679     */
    689680    for (VMCPUID i = 0; i < pVM->cCpus; i++)
    690681    {
     
    12381229    Log2(("VMMR3HmRunGC: (cs:rip=%04x:%RX64)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
    12391230
    1240     for (;;)
    1241     {
    1242         int rc;
    1243         do
    1244         {
     1231    int rc;
     1232    do
     1233    {
    12451234#ifdef NO_SUPCALLR0VMM
    1246             rc = VERR_GENERAL_FAILURE;
     1235        rc = VERR_GENERAL_FAILURE;
    12471236#else
    1248             rc = SUPR3CallVMMR0Fast(VMCC_GET_VMR0_FOR_CALL(pVM), VMMR0_DO_HM_RUN, pVCpu->idCpu);
    1249             if (RT_LIKELY(rc == VINF_SUCCESS))
    1250                 rc = pVCpu->vmm.s.iLastGZRc;
    1251 #endif
    1252         } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
     1237        rc = SUPR3CallVMMR0Fast(VMCC_GET_VMR0_FOR_CALL(pVM), VMMR0_DO_HM_RUN, pVCpu->idCpu);
     1238        if (RT_LIKELY(rc == VINF_SUCCESS))
     1239            rc = pVCpu->vmm.s.iLastGZRc;
     1240#endif
     1241    } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
    12531242
    12541243#if 0 /** @todo triggers too often */
    1255         Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3));
    1256 #endif
    1257 
    1258         /*
    1259          * Flush the logs
    1260          */
     1244    Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3));
     1245#endif
     1246
     1247    /*
     1248     * Flush the logs
     1249     */
    12611250#ifdef LOG_ENABLED
    1262         VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL);
    1263 #endif
    1264         VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance());
    1265         if (rc != VINF_VMM_CALL_HOST)
    1266         {
    1267             Log2(("VMMR3HmRunGC: returns %Rrc (cs:rip=%04x:%RX64)\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
    1268             return rc;
    1269         }
    1270         rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
    1271         if (RT_FAILURE(rc))
    1272             return rc;
    1273         /* Resume R0 */
    1274     }
     1251    VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL);
     1252#endif
     1253    VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance());
     1254    if (rc != VERR_VMM_RING0_ASSERTION)
     1255    {
     1256        Log2(("VMMR3HmRunGC: returns %Rrc (cs:rip=%04x:%RX64)\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
     1257        return rc;
     1258    }
     1259    return vmmR3HandleRing0Assert(pVM, pVCpu);
    12751260}
    12761261
     
    12861271VMMR3_INT_DECL(VBOXSTRICTRC) VMMR3CallR0EmtFast(PVM pVM, PVMCPU pVCpu, VMMR0OPERATION enmOperation)
    12871272{
    1288     for (;;)
    1289     {
    1290         VBOXSTRICTRC rcStrict;
    1291         do
    1292         {
     1273    VBOXSTRICTRC rcStrict;
     1274    do
     1275    {
    12931276#ifdef NO_SUPCALLR0VMM
    1294             rcStrict = VERR_GENERAL_FAILURE;
     1277        rcStrict = VERR_GENERAL_FAILURE;
    12951278#else
    1296             rcStrict = SUPR3CallVMMR0Fast(VMCC_GET_VMR0_FOR_CALL(pVM), enmOperation, pVCpu->idCpu);
    1297             if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    1298                 rcStrict = pVCpu->vmm.s.iLastGZRc;
    1299 #endif
    1300         } while (rcStrict == VINF_EM_RAW_INTERRUPT_HYPER);
    1301 
    1302         /*
    1303          * Flush the logs
    1304          */
     1279        rcStrict = SUPR3CallVMMR0Fast(VMCC_GET_VMR0_FOR_CALL(pVM), enmOperation, pVCpu->idCpu);
     1280        if (RT_LIKELY(rcStrict == VINF_SUCCESS))
     1281            rcStrict = pVCpu->vmm.s.iLastGZRc;
     1282#endif
     1283    } while (rcStrict == VINF_EM_RAW_INTERRUPT_HYPER);
     1284
     1285    /*
     1286     * Flush the logs
     1287     */
    13051288#ifdef LOG_ENABLED
    1306         VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL);
    1307 #endif
    1308         VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance());
    1309         if (rcStrict != VINF_VMM_CALL_HOST)
    1310             return rcStrict;
    1311         int rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
    1312         if (RT_FAILURE(rc))
    1313             return rc;
    1314         /* Resume R0 */
    1315     }
     1289    VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL);
     1290#endif
     1291    VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance());
     1292    if (rcStrict != VERR_VMM_RING0_ASSERTION)
     1293        return rcStrict;
     1294    return vmmR3HandleRing0Assert(pVM, pVCpu);
    13161295}
    13171296
     
    24492428VMMR3_INT_DECL(int) VMMR3CallR0Emt(PVM pVM, PVMCPU pVCpu, VMMR0OPERATION enmOperation, uint64_t u64Arg, PSUPVMMR0REQHDR pReqHdr)
    24502429{
    2451     int rc;
    2452     for (;;)
    2453     {
     2430    /*
     2431     * Call ring-0.
     2432     */
    24542433#ifdef NO_SUPCALLR0VMM
    2455         rc = VERR_GENERAL_FAILURE;
     2434    int rc = VERR_GENERAL_FAILURE;
    24562435#else
    2457         rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), pVCpu->idCpu, enmOperation, u64Arg, pReqHdr);
    2458 #endif
    2459         /*
    2460          * Flush the logs.
    2461          */
     2436    int rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), pVCpu->idCpu, enmOperation, u64Arg, pReqHdr);
     2437#endif
     2438
     2439    /*
     2440     * Flush the logs and deal with ring-0 assertions.
     2441     */
    24622442#ifdef LOG_ENABLED
    2463         VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL);
    2464 #endif
    2465         VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance());
    2466         if (rc != VINF_VMM_CALL_HOST)
    2467             break;
    2468         rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
    2469         if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
    2470             break;
    2471         /* Resume R0 */
    2472     }
    2473 
    2474     AssertLogRelMsgReturn(rc == VINF_SUCCESS || RT_FAILURE(rc),
    2475                           ("enmOperation=%u rc=%Rrc\n", enmOperation, rc),
    2476                           VERR_IPE_UNEXPECTED_INFO_STATUS);
    2477     return rc;
    2478 }
    2479 
    2480 
    2481 /**
    2482  * Service a call to the ring-3 host code.
     2443    VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL);
     2444#endif
     2445    VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance());
     2446    if (rc != VERR_VMM_RING0_ASSERTION)
     2447    {
     2448        AssertLogRelMsgReturn(rc == VINF_SUCCESS || RT_FAILURE(rc),
     2449                              ("enmOperation=%u rc=%Rrc\n", enmOperation, rc),
     2450                              VERR_IPE_UNEXPECTED_INFO_STATUS);
     2451        return rc;
     2452    }
     2453    return vmmR3HandleRing0Assert(pVM, pVCpu);
     2454}
     2455
     2456
     2457/**
     2458 * Logs a ring-0 assertion ASAP after returning to ring-3.
    24832459 *
    24842460 * @returns VBox status code.
    2485  * @param   pVM     The cross context VM structure.
    2486  * @param   pVCpu   The cross context virtual CPU structure.
    2487  * @remarks Careful with critsects.
    2488  */
    2489 static int vmmR3ServiceCallRing3Request(PVM pVM, PVMCPU pVCpu)
    2490 {
    2491     /*
    2492      * We must also check for pending critsect exits or else we can deadlock
    2493      * when entering other critsects here.
    2494      */
    2495     if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
    2496         PDMCritSectBothFF(pVM, pVCpu);
    2497 
    2498     switch (pVCpu->vmm.s.enmCallRing3Operation)
    2499     {
    2500         /*
    2501          * Signal a ring 0 hypervisor assertion.
    2502          * Cancel the longjmp operation that's in progress.
    2503          */
    2504         case VMMCALLRING3_VM_R0_ASSERTION:
    2505             pVCpu->vmm.s.enmCallRing3Operation = VMMCALLRING3_INVALID;
    2506             pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call = false;
     2461 * @param   pVM         The cross context VM structure.
     2462 * @param   pVCpu       The cross context virtual CPU structure.
     2463 */
     2464static int vmmR3HandleRing0Assert(PVM pVM, PVMCPU pVCpu)
     2465{
     2466    /*
     2467     * Signal a ring 0 hypervisor assertion.
     2468     * Cancel the longjmp operation that's in progress.
     2469     */
     2470    pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call = false;
    25072471#ifdef RT_ARCH_X86
    2508             pVCpu->vmm.s.CallRing3JmpBufR0.eip = 0;
     2472    pVCpu->vmm.s.CallRing3JmpBufR0.eip = 0;
    25092473#else
    2510             pVCpu->vmm.s.CallRing3JmpBufR0.rip = 0;
     2474    pVCpu->vmm.s.CallRing3JmpBufR0.rip = 0;
    25112475#endif
    25122476#ifdef VMM_R0_SWITCH_STACK
    2513             *(uint64_t *)pVCpu->vmm.s.pbEMTStackR3 = 0; /* clear marker  */
    2514 #endif
    2515             LogRel(("%s", pVM->vmm.s.szRing0AssertMsg1));
    2516             LogRel(("%s", pVM->vmm.s.szRing0AssertMsg2));
    2517             return VERR_VMM_RING0_ASSERTION;
    2518 
    2519         default:
    2520             AssertMsgFailed(("enmCallRing3Operation=%d\n", pVCpu->vmm.s.enmCallRing3Operation));
    2521             return VERR_VMM_UNKNOWN_RING3_CALL;
    2522     }
     2477    *(uint64_t *)pVCpu->vmm.s.pbEMTStackR3 = 0; /* clear marker  */
     2478#endif
     2479    LogRel(("%s", pVM->vmm.s.szRing0AssertMsg1));
     2480    LogRel(("%s", pVM->vmm.s.szRing0AssertMsg2));
     2481    return VERR_VMM_RING0_ASSERTION;
    25232482}
    25242483
  • trunk/src/VBox/VMM/VMMRZ/VMMRZ.cpp

    r90953 r92392  
    2929#include <iprt/asm-amd64-x86.h>
    3030#include <iprt/string.h>
    31 
    32 
    33 /**
    34  * Calls the ring-3 host code.
    35  *
    36  * @returns VBox status code of the ring-3 call.
    37  * @retval  VERR_VMM_RING3_CALL_DISABLED if called at the wrong time. This must
    38  *          be passed up the stack, or if that isn't possible then VMMRZCallRing3
    39  *          needs to change it into an assertion.
    40  *
    41  *
    42  * @param   pVM             The cross context VM structure.
    43  * @param   pVCpu           The cross context virtual CPU structure of the calling EMT.
    44  * @param   enmOperation    The operation.
    45  * @param   uArg            The argument to the operation.
    46  */
    47 VMMRZDECL(int) VMMRZCallRing3(PVMCC pVM, PVMCPUCC pVCpu, VMMCALLRING3 enmOperation, uint64_t uArg)
    48 {
    49     VMCPU_ASSERT_EMT(pVCpu);
    50 
    51     /*
    52      * Check if calling ring-3 has been disabled and only let let fatal calls thru.
    53      */
    54     if (RT_UNLIKELY(    pVCpu->vmm.s.cCallRing3Disabled != 0
    55                     &&  enmOperation != VMMCALLRING3_VM_R0_ASSERTION))
    56     {
    57 #ifndef IN_RING0
    58         /*
    59          * In most cases, it's sufficient to return a status code which
    60          * will then be propagated up the code usually encountering several
    61          * AssertRC invocations along the way. Hitting one of those is more
    62          * helpful than stopping here.
    63          *
    64          * However, some doesn't check the status code because they are called
    65          * from void functions, and for these we'll turn this into a ring-0
    66          * assertion host call.
    67          */
    68         if (enmOperation != VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS)
    69             return VERR_VMM_RING3_CALL_DISABLED;
    70 #endif
    71 #ifdef IN_RC
    72         RTStrPrintf(g_szRTAssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
    73                     "VMMRZCallRing3: enmOperation=%d uArg=%#llx idCpu=%#x cCallRing3Disabled=%#x\n",
    74                     enmOperation, uArg, pVCpu->idCpu, pVCpu->vmm.s.cCallRing3Disabled);
    75 #endif
    76         RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
    77                     "VMMRZCallRing3: enmOperation=%d uArg=%#llx idCpu=%#x cCallRing3Disabled=%#x\n",
    78                     enmOperation, uArg, pVCpu->idCpu, pVCpu->vmm.s.cCallRing3Disabled);
    79         enmOperation = VMMCALLRING3_VM_R0_ASSERTION;
    80     }
    81 
    82     /*
    83      * The normal path.
    84      */
    85 /** @todo profile this! */
    86     pVCpu->vmm.s.enmCallRing3Operation = enmOperation;
    87     pVCpu->vmm.s.u64CallRing3Arg = uArg;
    88     pVCpu->vmm.s.rcCallRing3 = VERR_VMM_RING3_CALL_NO_RC;
    89 #ifdef IN_RC
    90     pVM->vmm.s.pfnRCToHost(VINF_VMM_CALL_HOST);
    91 #else
    92     int rc;
    93     if (pVCpu->vmm.s.pfnCallRing3CallbackR0)
    94     {
    95         rc = pVCpu->vmm.s.pfnCallRing3CallbackR0(pVCpu, enmOperation, pVCpu->vmm.s.pvCallRing3CallbackUserR0);
    96         if (RT_FAILURE(rc))
    97             return rc;
    98     }
    99     rc = vmmR0CallRing3LongJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, VINF_VMM_CALL_HOST);
    100     if (RT_FAILURE(rc))
    101         return rc;
    102 #endif
    103     return pVCpu->vmm.s.rcCallRing3;
    104 }
    105 
    106 
    107 /**
    108  * Simple wrapper that adds the pVCpu argument.
    109  *
    110  * @returns VBox status code of the ring-3 call.
    111  * @retval  VERR_VMM_RING3_CALL_DISABLED if called at the wrong time. This must
    112  *          be passed up the stack, or if that isn't possible then VMMRZCallRing3
    113  *          needs to change it into an assertion.
    114  *
    115  * @param   pVM             The cross context VM structure.
    116  * @param   enmOperation    The operation.
    117  * @param   uArg            The argument to the operation.
    118  */
    119 VMMRZDECL(int) VMMRZCallRing3NoCpu(PVMCC pVM, VMMCALLRING3 enmOperation, uint64_t uArg)
    120 {
    121     return VMMRZCallRing3(pVM, VMMGetCpu(pVM), enmOperation, uArg);
    122 }
    12331
    12432
     
    194102}
    195103
    196 
    197 /**
    198  * Sets the ring-0 callback before doing the ring-3 call.
    199  *
    200  * @param   pVCpu         The cross context virtual CPU structure.
    201  * @param   pfnCallback   Pointer to the callback.
    202  * @param   pvUser        The user argument.
    203  *
    204  * @return VBox status code.
    205  */
    206 VMMRZDECL(int) VMMRZCallRing3SetNotification(PVMCPUCC pVCpu, R0PTRTYPE(PFNVMMR0CALLRING3NOTIFICATION) pfnCallback, RTR0PTR pvUser)
    207 {
    208     AssertPtrReturn(pVCpu, VERR_INVALID_POINTER);
    209     AssertPtrReturn(pfnCallback, VERR_INVALID_POINTER);
    210 
    211     if (pVCpu->vmm.s.pfnCallRing3CallbackR0)
    212         return VERR_ALREADY_EXISTS;
    213 
    214     pVCpu->vmm.s.pfnCallRing3CallbackR0    = pfnCallback;
    215     pVCpu->vmm.s.pvCallRing3CallbackUserR0 = pvUser;
    216     return VINF_SUCCESS;
    217 }
    218 
    219 
    220 /**
    221  * Removes the ring-0 callback.
    222  *
    223  * @param   pVCpu   The cross context virtual CPU structure.
    224  */
    225 VMMRZDECL(void) VMMRZCallRing3RemoveNotification(PVMCPUCC pVCpu)
    226 {
    227     pVCpu->vmm.s.pfnCallRing3CallbackR0 = NULL;
    228 }
    229 
    230 
    231 /**
    232  * Checks whether there is a ring-0 callback notification active.
    233  *
    234  * @param   pVCpu   The cross context virtual CPU structure.
    235  * @returns true if there the notification is active, false otherwise.
    236  */
    237 VMMRZDECL(bool) VMMRZCallRing3IsNotificationSet(PVMCPUCC pVCpu)
    238 {
    239     return pVCpu->vmm.s.pfnCallRing3CallbackR0 != NULL;
    240 }
    241 
  • trunk/src/VBox/VMM/include/VMMInternal.h

    r92391 r92392  
    408408    STAMCOUNTER                 StatRZRetTimerPending;
    409409    STAMCOUNTER                 StatRZRetInterruptPending;
    410     STAMCOUNTER                 StatRZRetCallRing3;
    411410    STAMCOUNTER                 StatRZRetPATMDuplicateFn;
    412411    STAMCOUNTER                 StatRZRetPGMChangeMode;
     
    466465    /** @} */
    467466
    468     /** Alignment padding, making sure u64CallRing3Arg and CallRing3JmpBufR0 are nicely aligned. */
    469     uint32_t                    au32Padding3[1];
    470 
    471467    /** @name Call Ring-3
    472468     * Formerly known as host calls.
     
    474470    /** The disable counter. */
    475471    uint32_t                    cCallRing3Disabled;
    476     /** The pending operation. */
    477     VMMCALLRING3                enmCallRing3Operation;
    478     /** The result of the last operation. */
    479     int32_t                     rcCallRing3;
    480     /** The argument to the operation. */
    481     uint64_t                    u64CallRing3Arg;
    482     /** The Ring-0 notification callback. */
    483     R0PTRTYPE(PFNVMMR0CALLRING3NOTIFICATION)   pfnCallRing3CallbackR0;
    484     /** The Ring-0 notification callback user argument. */
    485     R0PTRTYPE(void *)           pvCallRing3CallbackUserR0;
     472    uint32_t                    u32Padding3;
     473    /** Ring-0 assertion notification callback. */
     474    R0PTRTYPE(PFNVMMR0ASSERTIONNOTIFICATION) pfnRing0AssertCallback;
     475    /** Argument for pfnRing0AssertionNotificationCallback. */
     476    R0PTRTYPE(void *)           pvRing0AssertCallbackUser;
    486477    /** The Ring-0 jmp buffer.
    487478     * @remarks The size of this type isn't stable in assembly, so don't put
  • trunk/src/VBox/VMM/include/VMMInternal.mac

    r91751 r92392  
    127127        .TracerCtx              resb SUPDRVTRACERUSRCTX64_size
    128128
    129         .au32Padding3           resd 1
    130 
    131129        .cCallRing3Disabled     resd 1
    132         .enmCallRing3Operation  resd 1
    133         .rcCallRing3            resd 1
    134130        alignb 8
    135         .u64CallRing3Arg        resq 1
    136         .pfnCallRing3CallbackR0        RTR0PTR_RES 1
    137         .pvCallRing3CallbackUserR0      RTR0PTR_RES 1
    138         ; .CallRing3JmpBufR0    resb no-can-do
     131        .pfnRing0AssertCallback RTR0PTR_RES 1
     132        .pvRing0AssertCallbackUser RTR0PTR_RES 1
     133        alignb 16
     134        .CallRing3JmpBufR0      resb 1
    139135endstruc
    140136
  • trunk/src/VBox/VMM/testcase/tstVMStruct.h

    r91895 r92392  
    11861186    GEN_CHECK_OFF(VMMCPU, cCallRing3Disabled);
    11871187    GEN_CHECK_OFF(VMMCPU, enmCallRing3Operation);
    1188     GEN_CHECK_OFF(VMMCPU, rcCallRing3);
    1189     GEN_CHECK_OFF(VMMCPU, u64CallRing3Arg);
    11901188    GEN_CHECK_OFF(VMMCPU, CallRing3JmpBufR0);
    11911189    GEN_CHECK_OFF_DOT(VMMCPU, CallRing3JmpBufR0.SpCheck);
  • trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp

    r91306 r92392  
    265265    PVM pVM = NULL; NOREF(pVM);
    266266
    267     CHECK_MEMBER_ALIGNMENT(VMCPU, vmm.s.u64CallRing3Arg, 8);
    268267#if defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64)
    269268    CHECK_MEMBER_ALIGNMENT(VMCPU, vmm.s.CallRing3JmpBufR0, 16);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette