Changeset 90945 in vbox
- Timestamp:
- Aug 27, 2021 10:48:54 AM (4 years ago)
- svn:sync-xref-src-repo-rev:
- 146583
- Location:
- trunk
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/gvm.h
r90829 r90945 287 287 VMMR0PERVM s; 288 288 #endif 289 uint8_t padding[ 64];289 uint8_t padding[704]; 290 290 } vmmr0; 291 291 292 292 /** Padding so aCpus starts on a page boundrary. */ 293 293 #ifdef VBOX_WITH_NEM_R0 294 uint8_t abPadding2[4096*2 - 64 - 256 - 256 - 1024 - 256 - 64 - 2176 - 640 - 512 - 64 - 1024 - 128 - 64 - sizeof(PGVMCPU) * VMM_MAX_CPU_COUNT];294 uint8_t abPadding2[4096*2 - 64 - 256 - 256 - 1024 - 256 - 64 - 2176 - 640 - 512 - 64 - 1024 - 128 - 704 - sizeof(PGVMCPU) * VMM_MAX_CPU_COUNT]; 295 295 #else 296 uint8_t abPadding2[4096*2 - 64 - 256 - 256 - 1024 - 64 - 2176 - 640 - 512 - 64 - 1024 - 128 - 64 - sizeof(PGVMCPU) * VMM_MAX_CPU_COUNT];296 uint8_t abPadding2[4096*2 - 64 - 256 - 256 - 1024 - 64 - 2176 - 640 - 512 - 64 - 1024 - 128 - 704 - sizeof(PGVMCPU) * VMM_MAX_CPU_COUNT]; 297 297 #endif 298 298 -
trunk/include/VBox/vmm/vmm.h
r90897 r90945 75 75 /** Acquire the MM hypervisor heap lock. */ 76 76 VMMCALLRING3_MMHYPER_LOCK, 77 /** Flush the GC/R0 logger. */78 VMMCALLRING3_VMM_LOGGER_FLUSH,79 77 /** Set the VM error message. */ 80 78 VMMCALLRING3_VM_SET_ERROR, … … 299 297 /** Copy logger settings from userland, VMMR0UpdateLoggersReq(). */ 300 298 VMMR0_DO_VMMR0_UPDATE_LOGGERS, 299 /** Used by the log flusher, VMMR0LogFlusher. */ 300 VMMR0_DO_VMMR0_LOG_FLUSHER, 301 301 302 302 /** Setup hardware-assisted VM session. */ -
trunk/include/iprt/err.h
r90861 r90945 385 385 * (or should be) terminating. */ 386 386 #define VINF_THREAD_IS_TERMINATING (22424) 387 /** The thread is terminating. */ 388 #define VERR_THREAD_IS_TERMINATING (-22424) 387 389 /** @} */ 388 390 -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r90897 r90945 59 59 #include <iprt/mp.h> 60 60 #include <iprt/once.h> 61 #include <iprt/semaphore.h> 62 #include <iprt/spinlock.h> 61 63 #include <iprt/stdarg.h> 62 64 #include <iprt/string.h> … … 143 145 #endif 144 146 RT_C_DECLS_END 145 static int vmmR0UpdateLoggers(PGVM pGVM, VMCPUID idCpu, PVMMR0UPDATELOGGERSREQ pReq, bool fRelease); 146 static FNRTLOGFLUSH vmmR0LogFlush; 147 static FNRTLOGFLUSH vmmR0LogRelFlush; 147 static int vmmR0UpdateLoggers(PGVM pGVM, VMCPUID idCpu, PVMMR0UPDATELOGGERSREQ pReq, bool fRelease); 148 static int vmmR0LogFlusher(PGVM pGVM); 149 static int vmmR0InitLoggers(PGVM pGVM); 150 static void vmmR0CleanupLoggers(PGVM pGVM); 148 151 149 152 … … 364 367 365 368 /** 366 * Helper for vmmR0InitLoggers367 */368 static int vmmR0InitLoggerOne(PGVMCPU pGVCpu, bool fRelease, PVMMR0PERVCPULOGGER pR0Log, PVMMR3CPULOGGER pShared,369 uint32_t cbBuf, char *pchBuf, RTR3PTR pchBufR3)370 {371 pR0Log->BufDesc.u32Magic = RTLOGBUFFERDESC_MAGIC;372 pR0Log->BufDesc.uReserved = 0;373 pR0Log->BufDesc.cbBuf = cbBuf;374 pR0Log->BufDesc.offBuf = 0;375 pR0Log->BufDesc.pchBuf = pchBuf;376 pR0Log->BufDesc.pAux = &pShared->AuxDesc;377 378 pShared->AuxDesc.fFlushedIndicator = false;379 pShared->AuxDesc.afPadding[0] = 0;380 pShared->AuxDesc.afPadding[1] = 0;381 pShared->AuxDesc.afPadding[2] = 0;382 pShared->AuxDesc.offBuf = 0;383 pShared->pchBufR3 = pchBufR3;384 pShared->cbBuf = cbBuf;385 386 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;387 int rc = RTLogCreateEx(&pR0Log->pLogger, fRelease ? "VBOX_RELEASE_LOG" : "VBOX_LOG", RTLOG_F_NO_LOCKING | RTLOGFLAGS_BUFFERED,388 "all", RT_ELEMENTS(s_apszGroups), s_apszGroups, UINT32_MAX,389 1 /*cBufDescs*/, &pR0Log->BufDesc, RTLOGDEST_DUMMY,390 NULL /*pfnPhase*/, 0 /*cHistory*/, 0 /*cbHistoryFileMax*/, 0 /*cSecsHistoryTimeSlot*/,391 NULL /*pErrInfo*/, NULL /*pszFilenameFmt*/);392 if (RT_SUCCESS(rc))393 {394 PRTLOGGER pLogger = pR0Log->pLogger;395 pLogger->u32UserValue1 = VMMR0_LOGGER_FLAGS_MAGIC_VALUE;396 pLogger->u64UserValue2 = (uintptr_t)pGVCpu;397 pLogger->u64UserValue3 = (uintptr_t)pGVCpu;398 399 rc = RTLogSetFlushCallback(pLogger, fRelease ? vmmR0LogRelFlush : vmmR0LogFlush);400 if (RT_SUCCESS(rc))401 {402 RTLogSetR0ThreadNameF(pLogger, "EMT-%u-R0", pGVCpu->idCpu);403 return VINF_SUCCESS;404 }405 406 RTLogDestroy(pLogger);407 }408 pR0Log->pLogger = NULL;409 return rc;410 }411 412 413 /**414 * Initializes one type of loggers for each EMT.415 */416 static int vmmR0InitLoggers(PGVM pGVM, bool fRelease, uint32_t cbBuf, PRTR0MEMOBJ phMemObj, PRTR0MEMOBJ phMapObj)417 {418 /* Allocate buffers first. */419 int rc = RTR0MemObjAllocPage(phMemObj, cbBuf * pGVM->cCpus, false /*fExecutable*/);420 if (RT_SUCCESS(rc))421 {422 rc = RTR0MemObjMapUser(phMapObj, *phMemObj, (RTR3PTR)-1, 0 /*uAlignment*/, RTMEM_PROT_READ, NIL_RTR0PROCESS);423 if (RT_SUCCESS(rc))424 {425 char * const pchBuf = (char *)RTR0MemObjAddress(*phMemObj);426 AssertPtrReturn(pchBuf, VERR_INTERNAL_ERROR_2);427 428 RTR3PTR const pchBufR3 = RTR0MemObjAddressR3(*phMapObj);429 AssertReturn(pchBufR3 != NIL_RTR3PTR, VERR_INTERNAL_ERROR_3);430 431 /* Initialize the per-CPU loggers. */432 for (uint32_t i = 0; i < pGVM->cCpus; i++)433 {434 PGVMCPU pGVCpu = &pGVM->aCpus[i];435 PVMMR0PERVCPULOGGER pR0Log = fRelease ? &pGVCpu->vmmr0.s.RelLogger : &pGVCpu->vmmr0.s.Logger;436 PVMMR3CPULOGGER pShared = fRelease ? &pGVCpu->vmm.s.RelLogger : &pGVCpu->vmm.s.Logger;437 rc = vmmR0InitLoggerOne(pGVCpu, fRelease, pR0Log, pShared, cbBuf, pchBuf + i * cbBuf, pchBufR3 + i * cbBuf);438 if (RT_FAILURE(rc))439 {440 pR0Log->pLogger = NULL;441 pShared->pchBufR3 = NIL_RTR3PTR;442 while (i-- > 0)443 {444 pGVCpu = &pGVM->aCpus[i];445 pR0Log = fRelease ? &pGVCpu->vmmr0.s.RelLogger : &pGVCpu->vmmr0.s.Logger;446 pShared = fRelease ? &pGVCpu->vmm.s.RelLogger : &pGVCpu->vmm.s.Logger;447 RTLogDestroy(pR0Log->pLogger);448 pR0Log->pLogger = NULL;449 pShared->pchBufR3 = NIL_RTR3PTR;450 }451 break;452 }453 }454 if (RT_SUCCESS(rc))455 return VINF_SUCCESS;456 457 /* Bail out. */458 RTR0MemObjFree(*phMapObj, false /*fFreeMappings*/);459 *phMapObj = NIL_RTR0MEMOBJ;460 }461 RTR0MemObjFree(*phMemObj, true /*fFreeMappings*/);462 *phMemObj = NIL_RTR0MEMOBJ;463 }464 return rc;465 }466 467 468 /**469 369 * Initializes VMM specific members when the GVM structure is created, 470 370 * allocating loggers and stuff. … … 478 378 VMMR0_INT_DECL(int) VMMR0InitPerVMData(PGVM pGVM) 479 379 { 380 AssertCompile(sizeof(pGVM->vmmr0.s) <= sizeof(pGVM->vmmr0.padding)); 381 480 382 /* 481 383 * Initialize all members first. 482 384 */ 483 pGVM->vmmr0.s.hMemObjLogger = NIL_RTR0MEMOBJ; 484 pGVM->vmmr0.s.hMapObjLogger = NIL_RTR0MEMOBJ; 485 pGVM->vmmr0.s.hMemObjReleaseLogger = NIL_RTR0MEMOBJ; 486 pGVM->vmmr0.s.hMapObjReleaseLogger = NIL_RTR0MEMOBJ; 487 pGVM->vmmr0.s.fCalledInitVm = false; 385 pGVM->vmmr0.s.fCalledInitVm = false; 386 pGVM->vmmr0.s.hMemObjLogger = NIL_RTR0MEMOBJ; 387 pGVM->vmmr0.s.hMapObjLogger = NIL_RTR0MEMOBJ; 388 pGVM->vmmr0.s.hMemObjReleaseLogger = NIL_RTR0MEMOBJ; 389 pGVM->vmmr0.s.hMapObjReleaseLogger = NIL_RTR0MEMOBJ; 390 pGVM->vmmr0.s.LogFlusher.hSpinlock = NIL_RTSPINLOCK; 391 pGVM->vmmr0.s.LogFlusher.hThread = NIL_RTNATIVETHREAD; 392 pGVM->vmmr0.s.LogFlusher.hEvent = NIL_RTSEMEVENT; 393 pGVM->vmmr0.s.LogFlusher.idxRingHead = 0; 394 pGVM->vmmr0.s.LogFlusher.idxRingTail = 0; 395 pGVM->vmmr0.s.LogFlusher.fThreadWaiting = false; 488 396 489 397 for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++) … … 492 400 Assert(pGVCpu->idHostCpu == NIL_RTCPUID); 493 401 Assert(pGVCpu->iHostCpuSet == UINT32_MAX); 494 pGVCpu->vmmr0.s.fInHmContext = false; 495 pGVCpu->vmmr0.s.pPreemptState = NULL; 496 pGVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK; 497 } 498 499 /* 500 * Create the ring-0 release loggers. 501 */ 502 int rc = vmmR0InitLoggers(pGVM, true /*fRelease*/, _8K, 503 &pGVM->vmmr0.s.hMemObjReleaseLogger, &pGVM->vmmr0.s.hMapObjReleaseLogger); 504 505 #ifdef LOG_ENABLED 506 /* 507 * Create debug loggers. 508 */ 509 if (RT_SUCCESS(rc)) 510 rc = vmmR0InitLoggers(pGVM, false /*fRelease*/, _64K, &pGVM->vmmr0.s.hMemObjLogger, &pGVM->vmmr0.s.hMapObjLogger); 511 #endif 512 513 return rc; 402 pGVCpu->vmmr0.s.pPreemptState = NULL; 403 pGVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK; 404 pGVCpu->vmmr0.s.Logger.hEventFlushWait = NIL_RTSEMEVENT; 405 pGVCpu->vmmr0.s.RelLogger.hEventFlushWait = NIL_RTSEMEVENT; 406 } 407 408 /* 409 * Create the loggers. 410 */ 411 return vmmR0InitLoggers(pGVM); 514 412 } 515 413 … … 761 659 */ 762 660 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession); 661 662 /* 663 * Start log flusher thread termination. 664 */ 665 ASMAtomicWriteBool(&pGVM->vmmr0.s.LogFlusher.fThreadShutdown, true); 666 if (pGVM->vmmr0.s.LogFlusher.hEvent != NIL_RTSEMEVENT) 667 RTSemEventSignal(pGVM->vmmr0.s.LogFlusher.hEvent); 668 763 669 return VINF_SUCCESS; 764 670 } … … 782 688 * context. */ 783 689 VMMR0ThreadCtxHookDestroyForEmt(pGVCpu); 784 785 /* Destroy the release logger. */ 786 RTLogDestroy(pGVCpu->vmmr0.s.RelLogger.pLogger); 787 pGVCpu->vmmr0.s.RelLogger.pLogger = NULL; 788 pGVCpu->vmm.s.RelLogger.pchBufR3 = NIL_RTR3PTR; 789 790 /* Destroy the regular logger. */ 791 RTLogDestroy(pGVCpu->vmmr0.s.Logger.pLogger); 792 pGVCpu->vmmr0.s.Logger.pLogger = NULL; 793 pGVCpu->vmm.s.Logger.pchBufR3 = NIL_RTR3PTR; 794 } 795 796 /* 797 * Free logger buffer memory. 798 */ 799 RTR0MemObjFree(pGVM->vmmr0.s.hMapObjReleaseLogger, false /*fFreeMappings*/); 800 pGVM->vmmr0.s.hMapObjReleaseLogger = NIL_RTR0MEMOBJ; 801 RTR0MemObjFree(pGVM->vmmr0.s.hMemObjReleaseLogger, true /*fFreeMappings*/); 802 pGVM->vmmr0.s.hMemObjReleaseLogger = NIL_RTR0MEMOBJ; 803 804 RTR0MemObjFree(pGVM->vmmr0.s.hMapObjLogger, false /*fFreeMappings*/); 805 pGVM->vmmr0.s.hMapObjLogger = NIL_RTR0MEMOBJ; 806 RTR0MemObjFree(pGVM->vmmr0.s.hMemObjLogger, true /*fFreeMappings*/); 807 pGVM->vmmr0.s.hMemObjLogger = NIL_RTR0MEMOBJ; 690 } 691 692 vmmR0CleanupLoggers(pGVM); 808 693 } 809 694 … … 1526 1411 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy); 1527 1412 break; 1528 case VMMCALLRING3_VMM_LOGGER_FLUSH:1529 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);1530 break;1531 1413 case VMMCALLRING3_VM_SET_ERROR: 1532 1414 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError); … … 1620 1502 for (;;) /* hlt loop */ 1621 1503 { 1504 /* 1505 * Disable ring-3 calls & blocking till we've successfully entered HM. 1506 * Otherwise we sometimes end up blocking at the finall Log4 statement 1507 * in VMXR0Enter, while still in a somewhat inbetween state. 1508 */ 1509 VMMRZCallRing3Disable(pGVCpu); 1510 1622 1511 /* 1623 1512 * Disable preemption. … … 1674 1563 if (RT_SUCCESS(rc)) 1675 1564 { 1676 pGVCpu->vmmr0.s.fInHmContext = true;1677 1565 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED_HM); 1678 1566 … … 1687 1575 RTThreadPreemptRestore(&PreemptState); 1688 1576 } 1577 VMMRZCallRing3Enable(pGVCpu); 1689 1578 1690 1579 /* … … 1719 1608 #endif 1720 1609 1721 pGVCpu->vmmr0.s.fInHmContext = false;1610 VMMRZCallRing3Disable(pGVCpu); /* Lazy bird: Simpler just disabling it again... */ 1722 1611 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED); 1723 1612 } … … 1766 1655 vmmR0RecordRC(pGVM, pGVCpu, rc); 1767 1656 #endif 1657 VMMRZCallRing3Enable(pGVCpu); 1658 1768 1659 /* 1769 1660 * If this is a halt. … … 1791 1682 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID); 1792 1683 RTThreadPreemptRestore(&PreemptState); 1684 1685 VMMRZCallRing3Enable(pGVCpu); 1686 1793 1687 if (iHostCpuSet < RTCPUSET_MAX_CPUS) 1794 1688 { … … 1805 1699 } 1806 1700 break; 1807 1808 1701 } /* halt loop. */ 1809 1702 break; … … 2072 1965 if (u64Arg <= 1 && pReqHdr != NULL) 2073 1966 rc = vmmR0UpdateLoggers(pGVM, idCpu /*idCpu*/, (PVMMR0UPDATELOGGERSREQ)pReqHdr, u64Arg != 0); 1967 else 1968 return VERR_INVALID_PARAMETER; 1969 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1970 break; 1971 1972 /* 1973 * Log flusher thread. 1974 */ 1975 case VMMR0_DO_VMMR0_LOG_FLUSHER: 1976 if (idCpu != NIL_VMCPUID) 1977 return VERR_INVALID_CPU_ID; 1978 if (pReqHdr == NULL) 1979 rc = vmmR0LogFlusher(pGVM); 2074 1980 else 2075 1981 return VERR_INVALID_PARAMETER; … … 2756 2662 2757 2663 2664 /********************************************************************************************************************************* 2665 * EMT Blocking * 2666 *********************************************************************************************************************************/ 2667 2758 2668 /** 2759 2669 * Checks whether we've armed the ring-0 long jump machinery. … … 2819 2729 { 2820 2730 Assert(pVCpu->iHostCpuSet != UINT32_MAX); 2821 Assert(pVCpu->vmmr0.s.fInHmContext);2822 2731 2823 2732 if (pVCpu->vmmr0.s.hCtxHook == NIL_RTTHREADCTXHOOK) … … 2884 2793 } 2885 2794 2886 2887 /** 2888 * Updates the EMT loggers for the VM. 2795 /** @name VMMR0EMTWAIT_F_XXX - flags for VMMR0EmtWaitEventInner and friends. 2796 * @{ */ 2797 /** Try suppress VERR_INTERRUPTED for a little while (~10 sec). */ 2798 #define VMMR0EMTWAIT_F_TRY_SUPPRESS_INTERRUPTED RT_BIT_32(0) 2799 /** @} */ 2800 2801 /** 2802 * Helper for waiting on an RTSEMEVENT, caller did VMMR0EmtPrepareToBlock. 2803 * 2804 * @returns 2805 * @retval VERR_THREAD_IS_TERMINATING 2806 * @retval VERR_TIMEOUT if we ended up waiting too long, either according to 2807 * @a cMsTimeout or to maximum wait values. 2808 * 2809 * @param pGVCpu The ring-0 virtual CPU structure. 2810 * @param fFlags VMMR0EMTWAIT_F_XXX. 2811 * @param hEvent The event to wait on. 2812 * @param cMsTimeout The timeout or RT_INDEFINITE_WAIT. 2813 */ 2814 VMMR0DECL(int) VMMR0EmtWaitEventInner(PGVMCPU pGVCpu, uint32_t fFlags, RTSEMEVENT hEvent, RTMSINTERVAL cMsTimeout) 2815 { 2816 AssertReturn(pGVCpu->hEMT == RTThreadNativeSelf(), VERR_VM_THREAD_NOT_EMT); 2817 2818 /* 2819 * Note! Similar code is found in the PDM critical sections too. 2820 */ 2821 uint64_t const nsStart = RTTimeNanoTS(); 2822 uint64_t cNsMaxTotal = cMsTimeout == RT_INDEFINITE_WAIT 2823 ? RT_NS_5MIN : RT_MIN(RT_NS_5MIN, RT_NS_1MS_64 * cMsTimeout); 2824 uint32_t cMsMaxOne = RT_MS_5SEC; 2825 bool fNonInterruptible = false; 2826 for (;;) 2827 { 2828 /* Wait. */ 2829 int rcWait = !fNonInterruptible 2830 ? RTSemEventWaitNoResume(hEvent, cMsMaxOne) 2831 : RTSemEventWait(hEvent, cMsMaxOne); 2832 if (RT_SUCCESS(rcWait)) 2833 return rcWait; 2834 2835 if (rcWait == VERR_TIMEOUT || rcWait == VERR_INTERRUPTED) 2836 { 2837 uint64_t const cNsElapsed = RTTimeNanoTS() - nsStart; 2838 2839 /* 2840 * Check the thread termination status. 2841 */ 2842 int const rcTerm = RTThreadQueryTerminationStatus(NIL_RTTHREAD); 2843 AssertMsg(rcTerm == VINF_SUCCESS || rcTerm == VERR_NOT_SUPPORTED || rcTerm == VINF_THREAD_IS_TERMINATING, 2844 ("rcTerm=%Rrc\n", rcTerm)); 2845 if ( rcTerm == VERR_NOT_SUPPORTED 2846 && !fNonInterruptible 2847 && cNsMaxTotal > RT_NS_1MIN) 2848 cNsMaxTotal = RT_NS_1MIN; 2849 2850 /* We return immediately if it looks like the thread is terminating. */ 2851 if (rcTerm == VINF_THREAD_IS_TERMINATING) 2852 return VERR_THREAD_IS_TERMINATING; 2853 2854 /* We may suppress VERR_INTERRUPTED if VMMR0EMTWAIT_F_TRY_SUPPRESS_INTERRUPTED was 2855 specified, otherwise we'll just return it. */ 2856 if (rcWait == VERR_INTERRUPTED) 2857 { 2858 if (!(fFlags & VMMR0EMTWAIT_F_TRY_SUPPRESS_INTERRUPTED)) 2859 return VERR_INTERRUPTED; 2860 if (!fNonInterruptible) 2861 { 2862 /* First time: Adjust down the wait parameters and make sure we get at least 2863 one non-interruptible wait before timing out. */ 2864 fNonInterruptible = true; 2865 cMsMaxOne = 32; 2866 uint64_t const cNsLeft = cNsMaxTotal - cNsElapsed; 2867 if (cNsLeft > RT_NS_10SEC) 2868 cNsMaxTotal = cNsElapsed + RT_NS_10SEC; 2869 continue; 2870 } 2871 } 2872 2873 /* Check for timeout. */ 2874 if (cNsElapsed > cNsMaxTotal) 2875 return VERR_TIMEOUT; 2876 } 2877 else 2878 return rcWait; 2879 } 2880 /* not reached */ 2881 } 2882 2883 2884 /********************************************************************************************************************************* 2885 * Logging. * 2886 *********************************************************************************************************************************/ 2887 2888 /** 2889 * VMMR0_DO_VMMR0_UPDATE_LOGGERS: Updates the EMT loggers for the VM. 2889 2890 * 2890 2891 * @returns VBox status code. … … 2937 2938 2938 2939 /** 2940 * VMMR0_DO_VMMR0_LOG_FLUSHER: Get the next log flushing job. 2941 * 2942 * The job info is copied into VMM::LogFlusherItem. 2943 * 2944 * @returns VBox status code. 2945 * @retval VERR_OBJECT_DESTROYED if we're shutting down. 2946 * @retval VERR_NOT_OWNER if the calling thread is not the flusher thread. 2947 * @param pGVM The global (ring-0) VM structure. 2948 * @thread The log flusher thread (first caller automatically becomes the log 2949 * flusher). 2950 */ 2951 static int vmmR0LogFlusher(PGVM pGVM) 2952 { 2953 /* 2954 * Check that this really is the flusher thread. 2955 */ 2956 RTNATIVETHREAD const hNativeSelf = RTThreadNativeSelf(); 2957 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_INTERNAL_ERROR_3); 2958 if (RT_LIKELY(pGVM->vmmr0.s.LogFlusher.hThread == hNativeSelf)) 2959 { /* likely */ } 2960 else 2961 { 2962 /* The first caller becomes the flusher thread. */ 2963 bool fOk; 2964 ASMAtomicCmpXchgHandle(&pGVM->vmmr0.s.LogFlusher.hThread, hNativeSelf, NIL_RTNATIVETHREAD, fOk); 2965 if (!fOk) 2966 return VERR_NOT_OWNER; 2967 pGVM->vmmr0.s.LogFlusher.fThreadRunning = true; 2968 } 2969 2970 /* 2971 * Acknowledge flush, waking up waiting EMT. 2972 */ 2973 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock); 2974 2975 uint32_t idxTail = pGVM->vmmr0.s.LogFlusher.idxRingTail % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing); 2976 uint32_t idxHead = pGVM->vmmr0.s.LogFlusher.idxRingHead % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing); 2977 if ( idxTail != idxHead 2978 && pGVM->vmmr0.s.LogFlusher.aRing[idxHead].s.fProcessing) 2979 { 2980 /* Pop the head off the ring buffer. */ 2981 uint32_t const idCpu = pGVM->vmmr0.s.LogFlusher.aRing[idxHead].s.idCpu; 2982 uint32_t const idxLogger = pGVM->vmmr0.s.LogFlusher.aRing[idxHead].s.idxLogger; 2983 uint32_t const idxBuffer = pGVM->vmmr0.s.LogFlusher.aRing[idxHead].s.idxBuffer; 2984 2985 pGVM->vmmr0.s.LogFlusher.aRing[idxHead].u32 = UINT32_MAX >> 1; /* invalidate the entry */ 2986 pGVM->vmmr0.s.LogFlusher.idxRingHead = (idxHead + 1) % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing); 2987 2988 /* Do the waking up. */ 2989 if ( idCpu < pGVM->cCpus 2990 && idxLogger < 2 2991 && idxBuffer < 1) 2992 { 2993 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu]; 2994 PVMMR0PERVCPULOGGER pR0Log = idxLogger == 1 ? &pGVCpu->vmmr0.s.Logger : &pGVCpu->vmmr0.s.RelLogger; 2995 pR0Log->fFlushDone = true; 2996 2997 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock); 2998 2999 int rc = RTSemEventSignal(pR0Log->hEventFlushWait); 3000 if (RT_FAILURE(rc)) 3001 LogRelMax(64, ("vmmR0LogFlusher: RTSemEventSignal failed ACKing entry #%u (%u/%u/%u): %Rrc!\n", 3002 idxHead, idCpu, idxLogger, idxBuffer, rc)); 3003 } 3004 else 3005 { 3006 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock); 3007 LogRelMax(64, ("vmmR0LogFlusher: Bad ACK entry #%u: %u/%u/%u!\n", idxHead, idCpu, idxLogger, idxBuffer)); 3008 } 3009 3010 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock); 3011 } 3012 3013 /* 3014 * The wait loop. 3015 */ 3016 int rc; 3017 for (;;) 3018 { 3019 /* 3020 * Work pending? 3021 */ 3022 idxTail = pGVM->vmmr0.s.LogFlusher.idxRingTail % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing); 3023 idxHead = pGVM->vmmr0.s.LogFlusher.idxRingHead % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing); 3024 if (idxTail != idxHead) 3025 { 3026 pGVM->vmmr0.s.LogFlusher.aRing[idxHead].s.fProcessing = true; 3027 pGVM->vmm.s.LogFlusherItem.u32 = pGVM->vmmr0.s.LogFlusher.aRing[idxHead].u32; 3028 3029 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock); 3030 return VINF_SUCCESS; 3031 } 3032 3033 /* 3034 * Nothing to do, so, check for termination and go to sleep. 3035 */ 3036 if (!pGVM->vmmr0.s.LogFlusher.fThreadShutdown) 3037 { /* likely */ } 3038 else 3039 { 3040 rc = VERR_OBJECT_DESTROYED; 3041 break; 3042 } 3043 3044 pGVM->vmmr0.s.LogFlusher.fThreadWaiting = true; 3045 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock); 3046 3047 rc = RTSemEventWaitNoResume(pGVM->vmmr0.s.LogFlusher.hEvent, RT_MS_5MIN); 3048 3049 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock); 3050 pGVM->vmmr0.s.LogFlusher.fThreadWaiting = false; 3051 3052 if (RT_SUCCESS(rc) || rc == VERR_TIMEOUT) 3053 { /* likely */ } 3054 else if (rc == VERR_INTERRUPTED) 3055 { 3056 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock); 3057 return rc; 3058 } 3059 else if (rc == VERR_SEM_DESTROYED || rc == VERR_INVALID_HANDLE) 3060 break; 3061 else 3062 { 3063 LogRel(("vmmR0LogFlusher: RTSemEventWaitNoResume returned unexpected status %Rrc\n", rc)); 3064 break; 3065 } 3066 } 3067 3068 /* 3069 * Terminating - prevent further calls and indicate to the EMTs that we're no longer around. 3070 */ 3071 pGVM->vmmr0.s.LogFlusher.hThread = ~pGVM->vmmr0.s.LogFlusher.hThread; /* (should be reasonably safe) */ 3072 pGVM->vmmr0.s.LogFlusher.fThreadRunning = false; 3073 3074 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock); 3075 return rc; 3076 } 3077 3078 3079 /** 2939 3080 * Common worker for vmmR0LogFlush and vmmR0LogRelFlush. 2940 3081 */ … … 2953 3094 && pLogger->u64UserValue2 == pLogger->u64UserValue3) 2954 3095 { 2955 if (!(pLogger->u32UserValue1 & VMMR0_LOGGER_FLAGS_FLUSHING_DISABLED)) 3096 PGVMCPU const pGVCpu = (PGVMCPU)(uintptr_t)pLogger->u64UserValue2; 3097 if ( RT_VALID_PTR(pGVCpu) 3098 && ((uintptr_t)pGVCpu & PAGE_OFFSET_MASK) == 0) 2956 3099 { 2957 PGVMCPU const pGVCpu = (PGVMCPU)(uintptr_t)pLogger->u64UserValue2; 2958 if ( RT_VALID_PTR(pGVCpu) 2959 && ((uintptr_t)pGVCpu & PAGE_OFFSET_MASK) == 0) 3100 RTNATIVETHREAD const hNativeSelf = RTThreadNativeSelf(); 3101 PGVM const pGVM = pGVCpu->pGVM; 3102 if ( hNativeSelf == pGVCpu->hEMT 3103 && RT_VALID_PTR(pGVM)) 2960 3104 { 2961 RTNATIVETHREAD const hNativeSelf = RTThreadNativeSelf(); 2962 if ( hNativeSelf == pGVCpu->hEMT 2963 && RT_VALID_PTR(pGVCpu->pGVM)) 3105 PVMMR0PERVCPULOGGER const pR0Log = !fRelease ? &pGVCpu->vmmr0.s.Logger : &pGVCpu->vmmr0.s.RelLogger; 3106 PVMMR3CPULOGGER const pShared = !fRelease ? &pGVCpu->vmm.s.Logger : &pGVCpu->vmm.s.RelLogger; 3107 3108 /* 3109 * Can we wait on the log flusher to do the work? 3110 */ 3111 if ( VMMRZCallRing3IsEnabled(pGVCpu) 3112 && !(pLogger->u32UserValue1 & VMMR0_LOGGER_FLAGS_FLUSHING_DISABLED)) 2964 3113 { 2965 3114 /* 2966 * Check that the jump buffer is armed. 3115 * Make sure we don't recurse forever here should something in the 3116 * following code trigger logging or an assertion. 2967 3117 */ 2968 #ifdef RT_ARCH_X86 2969 if ( pGVCpu->vmm.s.CallRing3JmpBufR0.eip != 0 2970 && !pGVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call) 2971 #else 2972 if ( pGVCpu->vmm.s.CallRing3JmpBufR0.rip != 0 2973 && !pGVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call) 2974 #endif 3118 if (!pR0Log->fFlushing) 2975 3119 { 2976 VMMRZCallRing3(pGVCpu->pGVM, pGVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, fRelease); 3120 pR0Log->fFlushing = true; 3121 3122 /* 3123 * Prepare to block waiting for the flusher thread. 3124 */ 3125 VMMR0EMTBLOCKCTX Ctx; 3126 int rc = VMMR0EmtPrepareToBlock(pGVCpu, VINF_SUCCESS, "vmmR0LoggerFlushCommon", 3127 pR0Log->hEventFlushWait, &Ctx); 3128 if (RT_SUCCESS(rc)) 3129 { 3130 /* 3131 * Queue the flush job. 3132 */ 3133 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock); 3134 if (pGVM->vmmr0.s.LogFlusher.fThreadRunning) 3135 { 3136 uint32_t const idxHead = pGVM->vmmr0.s.LogFlusher.idxRingHead 3137 % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing); 3138 uint32_t const idxTail = pGVM->vmmr0.s.LogFlusher.idxRingTail 3139 % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing); 3140 uint32_t const idxNewTail = (idxTail + 1) 3141 % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing); 3142 if (idxNewTail != idxHead) 3143 { 3144 pGVM->vmmr0.s.LogFlusher.aRing[idxTail].s.idCpu = pGVCpu->idCpu; 3145 pGVM->vmmr0.s.LogFlusher.aRing[idxTail].s.idxLogger = !fRelease; 3146 pGVM->vmmr0.s.LogFlusher.aRing[idxTail].s.idxBuffer = 0; 3147 pGVM->vmmr0.s.LogFlusher.aRing[idxTail].s.fProcessing = 0; 3148 pGVM->vmmr0.s.LogFlusher.idxRingTail = idxNewTail; 3149 3150 if (pGVM->vmmr0.s.LogFlusher.fThreadWaiting) 3151 { 3152 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock); 3153 RTSemEventSignal(pGVM->vmmr0.s.LogFlusher.hEvent); 3154 } 3155 else 3156 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock); 3157 3158 /* 3159 * Wait for it to complete. 3160 */ 3161 VMMR0EmtWaitEventInner(pGVCpu, VMMR0EMTWAIT_F_TRY_SUPPRESS_INTERRUPTED, 3162 pR0Log->hEventFlushWait, RT_INDEFINITE_WAIT); 3163 } 3164 else 3165 { 3166 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock); 3167 SUP_DPRINTF(("vmmR0LoggerFlush: ring buffer is full!\n")); 3168 } 3169 } 3170 else 3171 { 3172 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock); 3173 SUP_DPRINTF(("vmmR0LoggerFlush: flusher not active - dropping %u bytes\n", 3174 pBufDesc->offBuf)); 3175 } 3176 VMMR0EmtResumeAfterBlocking(pGVCpu, &Ctx); 3177 } 3178 else 3179 SUP_DPRINTF(("vmmR0LoggerFlush: VMMR0EmtPrepareToBlock failed! rc=%d\n", rc)); 3180 pR0Log->fFlushing = false; 2977 3181 } 2978 #ifdef DEBUG 2979 else SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n"); 2980 #endif 3182 else 3183 SUP_DPRINTF(("vmmR0LoggerFlush: Recursive flushing!\n")); 2981 3184 } 2982 #ifdef DEBUG 2983 else SUPR0Printf("vmmR0LoggerFlush: pLogger=%p pGVCpu=%p hEMT=%p hNativeSelf=%p!\n", 2984 pLogger, pGVCpu, pGVCpu->hEMT, hNativeSelf); 2985 #endif 2986 3185 else 3186 { 3187 #if defined(RT_OS_LINUX) 3188 SUP_DPRINTF(("vmmR0LoggerFlush: EMT blocking disabled! (%u bytes)\n", pBufDesc->offBuf)); 3189 #endif 3190 pShared->cbDropped += pBufDesc->offBuf; 3191 } 2987 3192 } 2988 #ifdef DEBUG 2989 else SUPR0Printf("vmmR0LoggerFlush: pLogger=%p pGVCpu=%p!\n", pLogger, pGVCpu);2990 #endif 3193 else 3194 SUP_DPRINTF(("vmmR0LoggerFlush: pLogger=%p pGVCpu=%p hEMT=%p hNativeSelf=%p!\n", 3195 pLogger, pGVCpu, pGVCpu->hEMT, hNativeSelf)); 2991 3196 } 2992 /* else: quiet */ 2993 } 2994 #ifdef DEBUG 2995 else SUPR0Printf("vmmR0LoggerFlush: pLogger=%p u32Magic=%#x u32UserValue1=%#x u64UserValue2=%#RX64 u64UserValue3=%#RX64!\n", 2996 pLogger, pLogger->u32Magic, pLogger->u32UserValue1, pLogger->u64UserValue2, pLogger->u64UserValue3); 2997 #endif 2998 } 2999 #ifdef DEBUG 3000 else SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger); 3001 #endif 3197 else 3198 SUP_DPRINTF(("vmmR0LoggerFlush: pLogger=%p pGVCpu=%p!\n", pLogger, pGVCpu)); 3199 } 3200 else 3201 SUP_DPRINTF(("vmmR0LoggerFlush: pLogger=%p u32Magic=%#x u32UserValue1=%#x u64UserValue2=%#RX64 u64UserValue3=%#RX64!\n", 3202 pLogger, pLogger->u32Magic, pLogger->u32UserValue1, pLogger->u64UserValue2, pLogger->u64UserValue3)); 3203 } 3204 else 3205 SUP_DPRINTF(("vmmR0LoggerFlush: pLogger=%p!\n", pLogger)); 3002 3206 return true; 3003 3207 } … … 3085 3289 && pLogger->u64UserValue3 == (uintptr_t)pGVCpu) 3086 3290 { 3087 if (!(pGVCpu->vmmr0.s.fLogFlushingDisabled)) 3088 return RTLogCheckGroupFlags(pLogger, fFlagsAndGroup); 3291 if (!pGVCpu->vmmr0.s.Logger.fFlushing) 3292 { 3293 if (!(pGVCpu->vmmr0.s.fLogFlushingDisabled)) 3294 return RTLogCheckGroupFlags(pLogger, fFlagsAndGroup); 3295 return NULL; 3296 } 3297 3298 /* 3299 * When we're flushing we _must_ return NULL here to suppress any 3300 * attempts at using the logger while in vmmR0LoggerFlushCommon. 3301 * The VMMR0EmtPrepareToBlock code may trigger logging in HM, 3302 * which will reset the buffer content before we even get to queue 3303 * the flush request. (Only an issue when VBOX_WITH_R0_LOGGING 3304 * is enabled.) 3305 */ 3089 3306 return NULL; 3090 3307 } … … 3110 3327 && pLogger->u64UserValue3 == (uintptr_t)pGVCpu) 3111 3328 { 3112 if (!(pGVCpu->vmmr0.s.fLogFlushingDisabled)) 3113 return RTLogCheckGroupFlags(pLogger, fFlagsAndGroup); 3114 return NULL; 3329 if (!pGVCpu->vmmr0.s.RelLogger.fFlushing) 3330 { 3331 if (!(pGVCpu->vmmr0.s.fLogFlushingDisabled)) 3332 return RTLogCheckGroupFlags(pLogger, fFlagsAndGroup); 3333 return NULL; 3334 } 3115 3335 } 3116 3336 } … … 3119 3339 } 3120 3340 3341 3342 /** 3343 * Helper for vmmR0InitLoggerSet 3344 */ 3345 static int vmmR0InitLoggerOne(PGVMCPU pGVCpu, bool fRelease, PVMMR0PERVCPULOGGER pR0Log, PVMMR3CPULOGGER pShared, 3346 uint32_t cbBuf, char *pchBuf, RTR3PTR pchBufR3) 3347 { 3348 /* 3349 * Create and configure the logger. 3350 */ 3351 pR0Log->BufDesc.u32Magic = RTLOGBUFFERDESC_MAGIC; 3352 pR0Log->BufDesc.uReserved = 0; 3353 pR0Log->BufDesc.cbBuf = cbBuf; 3354 pR0Log->BufDesc.offBuf = 0; 3355 pR0Log->BufDesc.pchBuf = pchBuf; 3356 pR0Log->BufDesc.pAux = &pShared->AuxDesc; 3357 3358 pShared->AuxDesc.fFlushedIndicator = false; 3359 pShared->AuxDesc.afPadding[0] = 0; 3360 pShared->AuxDesc.afPadding[1] = 0; 3361 pShared->AuxDesc.afPadding[2] = 0; 3362 pShared->AuxDesc.offBuf = 0; 3363 pShared->pchBufR3 = pchBufR3; 3364 pShared->cbBuf = cbBuf; 3365 3366 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES; 3367 int rc = RTLogCreateEx(&pR0Log->pLogger, fRelease ? "VBOX_RELEASE_LOG" : "VBOX_LOG", RTLOG_F_NO_LOCKING | RTLOGFLAGS_BUFFERED, 3368 "all", RT_ELEMENTS(s_apszGroups), s_apszGroups, UINT32_MAX, 3369 1 /*cBufDescs*/, &pR0Log->BufDesc, RTLOGDEST_DUMMY, 3370 NULL /*pfnPhase*/, 0 /*cHistory*/, 0 /*cbHistoryFileMax*/, 0 /*cSecsHistoryTimeSlot*/, 3371 NULL /*pErrInfo*/, NULL /*pszFilenameFmt*/); 3372 if (RT_SUCCESS(rc)) 3373 { 3374 PRTLOGGER pLogger = pR0Log->pLogger; 3375 pLogger->u32UserValue1 = VMMR0_LOGGER_FLAGS_MAGIC_VALUE; 3376 pLogger->u64UserValue2 = (uintptr_t)pGVCpu; 3377 pLogger->u64UserValue3 = (uintptr_t)pGVCpu; 3378 3379 rc = RTLogSetFlushCallback(pLogger, fRelease ? vmmR0LogRelFlush : vmmR0LogFlush); 3380 if (RT_SUCCESS(rc)) 3381 { 3382 RTLogSetR0ThreadNameF(pLogger, "EMT-%u-R0", pGVCpu->idCpu); 3383 3384 /* 3385 * Create the event sem the EMT waits on while flushing is happening. 3386 */ 3387 rc = RTSemEventCreate(&pR0Log->hEventFlushWait); 3388 if (RT_SUCCESS(rc)) 3389 return VINF_SUCCESS; 3390 pR0Log->hEventFlushWait = NIL_RTSEMEVENT; 3391 } 3392 RTLogDestroy(pLogger); 3393 } 3394 pR0Log->pLogger = NULL; 3395 return rc; 3396 } 3397 3398 3399 /** 3400 * Worker for VMMR0CleanupVM and vmmR0InitLoggerSet that destroys one logger. 3401 */ 3402 static void vmmR0TermLoggerOne(PVMMR0PERVCPULOGGER pR0Log, PVMMR3CPULOGGER pShared) 3403 { 3404 RTLogDestroy(pR0Log->pLogger); 3405 pR0Log->pLogger = NULL; 3406 pShared->pchBufR3 = NIL_RTR3PTR; 3407 3408 RTSemEventDestroy(pR0Log->hEventFlushWait); 3409 pR0Log->hEventFlushWait = NIL_RTSEMEVENT; 3410 } 3411 3412 3413 /** 3414 * Initializes one type of loggers for each EMT. 3415 */ 3416 static int vmmR0InitLoggerSet(PGVM pGVM, bool fRelease, uint32_t cbBuf, PRTR0MEMOBJ phMemObj, PRTR0MEMOBJ phMapObj) 3417 { 3418 /* Allocate buffers first. */ 3419 int rc = RTR0MemObjAllocPage(phMemObj, cbBuf * pGVM->cCpus, false /*fExecutable*/); 3420 if (RT_SUCCESS(rc)) 3421 { 3422 rc = RTR0MemObjMapUser(phMapObj, *phMemObj, (RTR3PTR)-1, 0 /*uAlignment*/, RTMEM_PROT_READ, NIL_RTR0PROCESS); 3423 if (RT_SUCCESS(rc)) 3424 { 3425 char * const pchBuf = (char *)RTR0MemObjAddress(*phMemObj); 3426 AssertPtrReturn(pchBuf, VERR_INTERNAL_ERROR_2); 3427 3428 RTR3PTR const pchBufR3 = RTR0MemObjAddressR3(*phMapObj); 3429 AssertReturn(pchBufR3 != NIL_RTR3PTR, VERR_INTERNAL_ERROR_3); 3430 3431 /* Initialize the per-CPU loggers. */ 3432 for (uint32_t i = 0; i < pGVM->cCpus; i++) 3433 { 3434 PGVMCPU pGVCpu = &pGVM->aCpus[i]; 3435 PVMMR0PERVCPULOGGER pR0Log = fRelease ? &pGVCpu->vmmr0.s.RelLogger : &pGVCpu->vmmr0.s.Logger; 3436 PVMMR3CPULOGGER pShared = fRelease ? &pGVCpu->vmm.s.RelLogger : &pGVCpu->vmm.s.Logger; 3437 rc = vmmR0InitLoggerOne(pGVCpu, fRelease, pR0Log, pShared, cbBuf, pchBuf + i * cbBuf, pchBufR3 + i * cbBuf); 3438 if (RT_FAILURE(rc)) 3439 { 3440 pR0Log->pLogger = NULL; 3441 pShared->pchBufR3 = NIL_RTR3PTR; 3442 while (i-- > 0) 3443 { 3444 pGVCpu = &pGVM->aCpus[i]; 3445 vmmR0TermLoggerOne(fRelease ? &pGVCpu->vmmr0.s.RelLogger : &pGVCpu->vmmr0.s.Logger, 3446 fRelease ? &pGVCpu->vmm.s.RelLogger : &pGVCpu->vmm.s.Logger); 3447 } 3448 break; 3449 } 3450 } 3451 if (RT_SUCCESS(rc)) 3452 return VINF_SUCCESS; 3453 3454 /* Bail out. */ 3455 RTR0MemObjFree(*phMapObj, false /*fFreeMappings*/); 3456 *phMapObj = NIL_RTR0MEMOBJ; 3457 } 3458 RTR0MemObjFree(*phMemObj, true /*fFreeMappings*/); 3459 *phMemObj = NIL_RTR0MEMOBJ; 3460 } 3461 return rc; 3462 } 3463 3464 3465 /** 3466 * Worker for VMMR0InitPerVMData that initializes all the logging related stuff. 3467 * 3468 * @returns VBox status code. 3469 * @param pGVM The global (ring-0) VM structure. 3470 */ 3471 static int vmmR0InitLoggers(PGVM pGVM) 3472 { 3473 /* 3474 * Invalidate the ring buffer (not really necessary). 3475 */ 3476 for (size_t idx = 0; idx < RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing); idx++) 3477 pGVM->vmmr0.s.LogFlusher.aRing[idx].u32 = UINT32_MAX >> 1; /* (all bits except fProcessing set) */ 3478 3479 /* 3480 * Create the spinlock and flusher event semaphore. 3481 */ 3482 int rc = RTSpinlockCreate(&pGVM->vmmr0.s.LogFlusher.hSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VM-Log-Flusher"); 3483 if (RT_SUCCESS(rc)) 3484 { 3485 rc = RTSemEventCreate(&pGVM->vmmr0.s.LogFlusher.hEvent); 3486 if (RT_SUCCESS(rc)) 3487 { 3488 /* 3489 * Create the ring-0 release loggers. 3490 */ 3491 rc = vmmR0InitLoggerSet(pGVM, true /*fRelease*/, _8K, 3492 &pGVM->vmmr0.s.hMemObjReleaseLogger, &pGVM->vmmr0.s.hMapObjReleaseLogger); 3493 #ifdef LOG_ENABLED 3494 if (RT_SUCCESS(rc)) 3495 { 3496 /* 3497 * Create debug loggers. 3498 */ 3499 rc = vmmR0InitLoggerSet(pGVM, false /*fRelease*/, _64K, 3500 &pGVM->vmmr0.s.hMemObjLogger, &pGVM->vmmr0.s.hMapObjLogger); 3501 } 3502 #endif 3503 } 3504 } 3505 return rc; 3506 } 3507 3508 3509 /** 3510 * Worker for VMMR0InitPerVMData that initializes all the logging related stuff. 3511 * 3512 * @param pGVM The global (ring-0) VM structure. 3513 */ 3514 static void vmmR0CleanupLoggers(PGVM pGVM) 3515 { 3516 for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++) 3517 { 3518 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu]; 3519 vmmR0TermLoggerOne(&pGVCpu->vmmr0.s.RelLogger, &pGVCpu->vmm.s.RelLogger); 3520 vmmR0TermLoggerOne(&pGVCpu->vmmr0.s.Logger, &pGVCpu->vmm.s.Logger); 3521 } 3522 3523 /* 3524 * Free logger buffer memory. 3525 */ 3526 RTR0MemObjFree(pGVM->vmmr0.s.hMapObjReleaseLogger, false /*fFreeMappings*/); 3527 pGVM->vmmr0.s.hMapObjReleaseLogger = NIL_RTR0MEMOBJ; 3528 RTR0MemObjFree(pGVM->vmmr0.s.hMemObjReleaseLogger, true /*fFreeMappings*/); 3529 pGVM->vmmr0.s.hMemObjReleaseLogger = NIL_RTR0MEMOBJ; 3530 3531 RTR0MemObjFree(pGVM->vmmr0.s.hMapObjLogger, false /*fFreeMappings*/); 3532 pGVM->vmmr0.s.hMapObjLogger = NIL_RTR0MEMOBJ; 3533 RTR0MemObjFree(pGVM->vmmr0.s.hMemObjLogger, true /*fFreeMappings*/); 3534 pGVM->vmmr0.s.hMemObjLogger = NIL_RTR0MEMOBJ; 3535 3536 /* 3537 * Free log flusher related stuff. 3538 */ 3539 RTSpinlockDestroy(pGVM->vmmr0.s.LogFlusher.hSpinlock); 3540 pGVM->vmmr0.s.LogFlusher.hSpinlock = NIL_RTSPINLOCK; 3541 RTSemEventDestroy(pGVM->vmmr0.s.LogFlusher.hEvent); 3542 pGVM->vmmr0.s.LogFlusher.hEvent = NIL_RTSEMEVENT; 3543 } 3544 3545 3546 /********************************************************************************************************************************* 3547 * Assertions * 3548 *********************************************************************************************************************************/ 3121 3549 3122 3550 /* … … 3165 3593 { 3166 3594 /* 3595 * To host kernel log/whatever. 3596 */ 3597 SUPR0Printf("!!R0-Assertion Failed!!\n" 3598 "Expression: %s\n" 3599 "Location : %s(%d) %s\n", 3600 pszExpr, pszFile, uLine, pszFunction); 3601 3602 /* 3167 3603 * To the log. 3168 3604 */ … … 3217 3653 * Push the message to the loggers. 3218 3654 */ 3219 PRTLOGGER pLog = RTLog GetDefaultInstance(); /* Don't initialize it here... */3655 PRTLOGGER pLog = RTLogRelGetDefaultInstance(); 3220 3656 if (pLog) 3221 3657 { … … 3224 3660 va_end(vaCopy); 3225 3661 } 3226 pLog = RTLog RelGetDefaultInstance();3662 pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */ 3227 3663 if (pLog) 3228 3664 { -
trunk/src/VBox/VMM/VMMR3/VMM.cpp
r90862 r90945 176 176 uint32_t fFlags, PFNVMMEMTRENDEZVOUS pfnRendezvous, void *pvUser); 177 177 static int vmmR3ServiceCallRing3Request(PVM pVM, PVMCPU pVCpu); 178 static FNRTTHREAD vmmR3LogFlusher; 178 179 static DECLCALLBACK(void) vmmR3InfoFF(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs); 179 180 … … 293 294 { 294 295 /* 295 * Debug info and statistics.296 * Start the log flusher thread. 296 297 */ 297 DBGFR3InfoRegisterInternal(pVM, "fflags", "Displays the current Forced actions Flags.", vmmR3InfoFF); 298 vmmR3InitRegisterStats(pVM); 299 vmmInitFormatTypes(); 300 301 return VINF_SUCCESS; 298 rc = RTThreadCreate(&pVM->vmm.s.hLogFlusherThread, vmmR3LogFlusher, pVM, 0 /*cbStack*/, 299 RTTHREADTYPE_IO, RTTHREADFLAGS_WAITABLE, "R0LogWrk"); 300 if (RT_SUCCESS(rc)) 301 { 302 303 /* 304 * Debug info and statistics. 305 */ 306 DBGFR3InfoRegisterInternal(pVM, "fflags", "Displays the current Forced actions Flags.", vmmR3InfoFF); 307 vmmR3InitRegisterStats(pVM); 308 vmmInitFormatTypes(); 309 310 return VINF_SUCCESS; 311 } 302 312 } 303 313 } … … 424 434 STAM_REG(pVM, &pVM->vmm.s.StatRZCallPGMMapChunk, STAMTYPE_COUNTER, "/VMM/RZCallR3/PGMMapChunk", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_PGM_MAP_CHUNK calls."); 425 435 STAM_REG(pVM, &pVM->vmm.s.StatRZCallPGMAllocHandy, STAMTYPE_COUNTER, "/VMM/RZCallR3/PGMAllocHandy", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES calls."); 426 STAM_REG(pVM, &pVM->vmm.s.StatRZCallLogFlush, STAMTYPE_COUNTER, "/VMM/RZCallR3/VMMLogFlush", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_VMM_LOGGER_FLUSH calls.");427 436 STAM_REG(pVM, &pVM->vmm.s.StatRZCallVMSetError, STAMTYPE_COUNTER, "/VMM/RZCallR3/VMSetError", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_VM_SET_ERROR calls."); 428 437 STAM_REG(pVM, &pVM->vmm.s.StatRZCallVMSetRuntimeError, STAMTYPE_COUNTER, "/VMM/RZCallR3/VMRuntimeError", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_VM_SET_RUNTIME_ERROR calls."); … … 684 693 685 694 vmmTermFormatTypes(); 695 696 /* 697 * Wait for the log flusher thread to complete. 698 */ 699 if (pVM->vmm.s.hLogFlusherThread != NIL_RTTHREAD) 700 { 701 int rc2 = RTThreadWait(pVM->vmm.s.hLogFlusherThread, RT_MS_30SEC, NULL); 702 AssertLogRelRC(rc2); 703 if (RT_SUCCESS(rc2)) 704 pVM->vmm.s.hLogFlusherThread = NIL_RTTHREAD; 705 } 706 686 707 return rc; 687 708 } … … 780 801 781 802 return RT_SUCCESS(rcDebug) ? rcRelease : rcDebug; 803 } 804 805 806 /** 807 * @callback_method_impl{FNRTTHREAD, Ring-0 log flusher thread.} 808 */ 809 static DECLCALLBACK(int) vmmR3LogFlusher(RTTHREAD hThreadSelf, void *pvUser) 810 { 811 PVM const pVM = (PVM)pvUser; 812 RT_NOREF(hThreadSelf); 813 814 /* Reset the flusher state before we start: */ 815 pVM->vmm.s.LogFlusherItem.u32 = UINT32_MAX; 816 817 /* The loggers. */ 818 PRTLOGGER const apLoggers[2] = { RTLogRelGetDefaultInstance(), RTLogGetDefaultInstance() }; 819 820 /* 821 * The work loop. 822 */ 823 for (;;) 824 { 825 /* 826 * Wait for work. 827 */ 828 int rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), NIL_VMCPUID, VMMR0_DO_VMMR0_LOG_FLUSHER, 0, NULL); 829 if (RT_SUCCESS(rc)) 830 { 831 /* Paranoia: Make another copy of the request, to make sure the validated data can't be changed. */ 832 VMMLOGFLUSHERENTRY Item; 833 Item.u32 = pVM->vmm.s.LogFlusherItem.u32; 834 if ( Item.s.idCpu < pVM->cCpus 835 && Item.s.idxLogger <= RT_ELEMENTS(apLoggers) 836 && Item.s.idxBuffer <= 1) 837 { 838 /* 839 * Verify the request. 840 */ 841 PVMCPU const pVCpu = pVM->apCpusR3[Item.s.idCpu]; 842 PVMMR3CPULOGGER const pShared = Item.s.idxLogger == 1 ? &pVCpu->vmm.s.Logger : &pVCpu->vmm.s.RelLogger; 843 uint32_t const cbToFlush = pShared->AuxDesc.offBuf; 844 if (cbToFlush > 0) 845 { 846 if (cbToFlush <= pShared->cbBuf) 847 { 848 char * const pchBufR3 = pShared->pchBufR3; 849 if (pchBufR3) 850 { 851 /* 852 * Do the flushing. 853 */ 854 LogAlways(("*FLUSH* idCpu=%u idxLogger=%u idxBuffer=%u cbToFlush=%#x fFlushed=%RTbool cbDropped=%#x\n", 855 Item.s.idCpu, Item.s.idxLogger, Item.s.idxBuffer, cbToFlush, 856 pShared->AuxDesc.fFlushedIndicator, pShared->cbDropped)); 857 PRTLOGGER const pLogger = apLoggers[Item.s.idxLogger]; 858 if (pLogger) 859 RTLogBulkWrite(pLogger, pchBufR3, cbToFlush); 860 LogAlways(("*FLUSH DONE*\n")); 861 } 862 else 863 Log(("vmmR3LogFlusher: idCpu=%u idxLogger=%u idxBuffer=%u cbToFlush=%#x: Warning! No ring-3 buffer pointer!\n", 864 Item.s.idCpu, Item.s.idxLogger, Item.s.idxBuffer, cbToFlush)); 865 } 866 else 867 Log(("vmmR3LogFlusher: idCpu=%u idxLogger=%u idxBuffer=%u cbToFlush=%#x: Warning! Exceeds %#x bytes buffer size!\n", 868 Item.s.idCpu, Item.s.idxLogger, Item.s.idxBuffer, cbToFlush, pShared->cbBuf)); 869 } 870 else 871 Log(("vmmR3LogFlusher: idCpu=%u idxLogger=%u idxBuffer=%u cbToFlush=%#x: Warning! Zero bytes to flush!\n", 872 Item.s.idCpu, Item.s.idxLogger, Item.s.idxBuffer, cbToFlush)); 873 874 /* 875 * Mark the descriptor as flushed and set the request flag for same. 876 */ 877 pShared->AuxDesc.fFlushedIndicator = true; 878 } 879 else 880 { 881 Assert(Item.s.idCpu == UINT16_MAX); 882 Assert(Item.s.idxLogger == UINT8_MAX); 883 Assert(Item.s.idxBuffer == UINT8_MAX); 884 } 885 } 886 /* 887 * Interrupted can happen, just ignore it. 888 */ 889 else if (rc == VERR_INTERRUPTED) 890 { /* ignore*/ } 891 /* 892 * The ring-0 termination code will set the shutdown flag and wake us 893 * up, and we should return with object destroyed. In case there is 894 * some kind of race, we might also get sempahore destroyed. 895 */ 896 else if ( rc == VERR_OBJECT_DESTROYED 897 || rc == VERR_SEM_DESTROYED 898 || rc == VERR_INVALID_HANDLE) 899 { 900 LogRel(("vmmR3LogFlusher: Terminating (%Rrc)\n", rc)); 901 return VINF_SUCCESS; 902 } 903 /* 904 * There shouldn't be any other errors... 905 */ 906 else 907 { 908 LogRelMax(64, ("vmmR3LogFlusher: VMMR0_DO_VMMR0_LOG_FLUSHER -> %Rrc\n", rc)); 909 AssertRC(rc); 910 RTThreadSleep(1); 911 } 912 } 782 913 } 783 914 … … 2387 2518 2388 2519 /* 2389 * This is a noop. We just take this route to avoid unnecessary2390 * tests in the loops.2391 */2392 case VMMCALLRING3_VMM_LOGGER_FLUSH:2393 pVCpu->vmm.s.rcCallRing3 = VINF_SUCCESS;2394 LogAlways(("*FLUSH*\n"));2395 break;2396 2397 /*2398 2520 * Set the VM error message. 2399 2521 */ -
trunk/src/VBox/VMM/include/VMMInternal.h
r90862 r90945 26 26 #include <VBox/vmm/stam.h> 27 27 #include <VBox/vmm/vmm.h> 28 #include <VBox/param.h> 28 29 #include <VBox/log.h> 29 30 #include <iprt/critsect.h> … … 81 82 /** Flag indicating whether we've registered the instance already. */ 82 83 bool fRegistered; 83 bool afPadding[7]; 84 /** Set by the logger thread to indicate that buffer has been flushed. */ 85 bool volatile fFlushDone; 86 /** Set while we're inside vmmR0LoggerFlushCommon to prevent recursion. */ 87 bool fFlushing; 88 bool afPadding[5]; 89 /** The event semaphore the EMT waits on while the buffer is being flushed. */ 90 RTSEMEVENT hEventFlushWait; 84 91 } VMMR0PERVCPULOGGER; 85 92 /** Pointer to the R0 logger data (ring-0 only). */ … … 98 105 /** The buffer size. */ 99 106 uint32_t cbBuf; 100 uint32_t uReserved; 107 /** Number of bytes dropped because the flush context didn't allow waiting. */ 108 uint32_t cbDropped; 101 109 } VMMR3CPULOGGER; 102 110 /** Pointer to r0 logger data shared with ring-3. */ … … 195 203 /** Pointer to a ring-0 jump buffer. */ 196 204 typedef VMMR0JMPBUF *PVMMR0JMPBUF; 205 206 207 /** 208 * Log flusher job. 209 * 210 * There is a ring buffer of these in ring-0 (VMMR0PERVM::aLogFlushRing) and a 211 * copy of the current one in the shared VM structure (VMM::LogFlusherItem). 212 */ 213 typedef union VMMLOGFLUSHERENTRY 214 { 215 struct 216 { 217 /** The virtual CPU ID. */ 218 uint32_t idCpu : 16; 219 /** The logger: 0 for release, 1 for debug. */ 220 uint32_t idxLogger : 8; 221 /** The buffer to be flushed. */ 222 uint32_t idxBuffer : 7; 223 /** Set by the flusher thread once it fetched the entry and started 224 * processing it. */ 225 uint32_t fProcessing : 1; 226 } s; 227 uint32_t u32; 228 } VMMLOGFLUSHERENTRY; 197 229 198 230 … … 286 318 char szRing0AssertMsg2[256]; 287 319 320 /** @name Logging 321 * @{ */ 288 322 /** Used when setting up ring-0 logger. */ 289 323 uint64_t nsProgramStart; 324 /** Log flusher thread. */ 325 RTTHREAD hLogFlusherThread; 326 /** Copy of the current work log flusher work item. */ 327 VMMLOGFLUSHERENTRY volatile LogFlusherItem; 328 /** @} */ 290 329 291 330 /** Number of VMMR0_DO_HM_RUN or VMMR0_DO_NEM_RUN calls. */ … … 349 388 STAMCOUNTER StatRZCallPDMCritSectEnter; 350 389 STAMCOUNTER StatRZCallPDMLock; 351 STAMCOUNTER StatRZCallLogFlush;352 390 STAMCOUNTER StatRZCallPGMPoolGrow; 353 391 STAMCOUNTER StatRZCallPGMMapChunk; … … 464 502 typedef struct VMMR0PERVCPU 465 503 { 466 /** Set if we've entered HM context. */467 bool volatile fInHmContext;504 /** The EMT hash table index. */ 505 uint16_t idxEmtHash; 468 506 /** Flag indicating whether we've disabled flushing (world switch) or not. */ 469 507 bool fLogFlushingDisabled; 470 /** The EMT hash table index. */ 471 uint16_t idxEmtHash; 508 bool afPadding1[5]; 472 509 /** Pointer to the VMMR0EntryFast preemption state structure. 473 510 * This is used to temporarily restore preemption before blocking. */ … … 515 552 typedef struct VMMR0PERVM 516 553 { 554 /** Set if vmmR0InitVM has been called. */ 555 bool fCalledInitVm; 556 bool afPadding1[7]; 557 558 /** @name Logging 559 * @{ */ 517 560 /** Logger (debug) buffer allocation. 518 561 * This covers all CPUs. */ 519 RTR0MEMOBJ hMemObjLogger;562 RTR0MEMOBJ hMemObjLogger; 520 563 /** The ring-3 mapping object for hMemObjLogger. */ 521 RTR0MEMOBJ hMapObjLogger;564 RTR0MEMOBJ hMapObjLogger; 522 565 523 566 /** Release logger buffer allocation. 524 567 * This covers all CPUs. */ 525 RTR0MEMOBJ hMemObjReleaseLogger;568 RTR0MEMOBJ hMemObjReleaseLogger; 526 569 /** The ring-3 mapping object for hMemObjReleaseLogger. */ 527 RTR0MEMOBJ hMapObjReleaseLogger; 528 529 /** Set if vmmR0InitVM has been called. */ 530 bool fCalledInitVm; 570 RTR0MEMOBJ hMapObjReleaseLogger; 571 572 struct 573 { 574 /** Spinlock protecting the logger ring buffer and associated variables. */ 575 R0PTRTYPE(RTSPINLOCK) hSpinlock; 576 /** The log flusher thread handle to make sure there is only one. */ 577 RTNATIVETHREAD hThread; 578 /** The handle to the event semaphore the log flusher waits on. */ 579 RTSEMEVENT hEvent; 580 /** The index of the log flusher queue head (flusher thread side). */ 581 uint32_t volatile idxRingHead; 582 /** The index of the log flusher queue tail (EMT side). */ 583 uint32_t volatile idxRingTail; 584 /** Set if the log flusher thread is waiting for work and needs poking. */ 585 bool volatile fThreadWaiting; 586 /** Set when the log flusher thread should shut down. */ 587 bool volatile fThreadShutdown; 588 /** Indicates that the log flusher thread is running. */ 589 bool volatile fThreadRunning; 590 bool afPadding2[5]; 591 /** Logger ring buffer. 592 * This is for communicating with the log flusher thread. */ 593 VMMLOGFLUSHERENTRY aRing[VMM_MAX_CPU_COUNT * 2 /*loggers*/ * 1 /*buffer*/ + 16 /*fudge*/]; 594 } LogFlusher; 595 /** @} */ 531 596 } VMMR0PERVM; 532 597
Note:
See TracChangeset
for help on using the changeset viewer.