Changeset 90970 in vbox for trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
- Timestamp:
- Aug 27, 2021 10:14:33 PM (3 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r90958 r90970 2988 2988 pGVM->vmmr0.s.LogFlusher.idxRingHead = (idxHead + 1) % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing); 2989 2989 2990 /* Do the accounting and waking up. */2990 /* Validate content. */ 2991 2991 if ( idCpu < pGVM->cCpus 2992 2992 && idxLogger < VMMLOGGER_IDX_MAX … … 2997 2997 PVMMR3CPULOGGER pShared = &pGVCpu->vmm.s.u.aLoggers[idxLogger]; 2998 2998 2999 pR0Log->fFlushDone = true; 3000 2999 /* 3000 * Accounting. 3001 */ 3001 3002 uint32_t cFlushing = pR0Log->cFlushing - 1; 3002 3003 if (RT_LIKELY(cFlushing < VMMLOGGER_BUFFER_COUNT)) … … 3007 3008 ASMAtomicWriteU32(&pShared->cFlushing, cFlushing); 3008 3009 3009 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock); 3010 3011 int rc = RTSemEventSignal(pR0Log->hEventFlushWait); 3012 if (RT_FAILURE(rc)) 3013 LogRelMax(64, ("vmmR0LogFlusher: RTSemEventSignal failed ACKing entry #%u (%u/%u/%u): %Rrc!\n", 3014 idxHead, idCpu, idxLogger, idxBuffer, rc)); 3010 /* 3011 * Wake up the EMT if it's waiting. 3012 */ 3013 if (!pR0Log->fEmtWaiting) 3014 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock); 3015 else 3016 { 3017 pR0Log->fEmtWaiting = false; 3018 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock); 3019 3020 int rc = RTSemEventSignal(pR0Log->hEventFlushWait); 3021 if (RT_FAILURE(rc)) 3022 LogRelMax(64, ("vmmR0LogFlusher: RTSemEventSignal failed ACKing entry #%u (%u/%u/%u): %Rrc!\n", 3023 idxHead, idCpu, idxLogger, idxBuffer, rc)); 3024 } 3015 3025 } 3016 3026 else … … 3089 3099 3090 3100 3101 static bool vmmR0LoggerFlushInner(PGVM pGVM, PGVMCPU pGVCpu, uint32_t idxLogger, size_t idxBuffer, uint32_t cbToFlush) 3102 { 3103 PVMMR0PERVCPULOGGER const pR0Log = &pGVCpu->vmmr0.s.u.aLoggers[idxLogger]; 3104 PVMMR3CPULOGGER const pShared = &pGVCpu->vmm.s.u.aLoggers[idxLogger]; 3105 3106 /* 3107 * Figure out what we need to do and whether we can. 3108 */ 3109 enum { kJustSignal, kPrepAndSignal, kPrepSignalAndWait } enmAction; 3110 #if VMMLOGGER_BUFFER_COUNT >= 2 3111 if (pR0Log->cFlushing < VMMLOGGER_BUFFER_COUNT - 1) 3112 { 3113 if (RTSemEventIsSignalSafe()) 3114 enmAction = kJustSignal; 3115 else if (VMMRZCallRing3IsEnabled(pGVCpu)) 3116 enmAction = kPrepAndSignal; 3117 else 3118 { 3119 /** @todo This is a bit simplistic. We could introduce a FF to signal the 3120 * thread or similar. */ 3121 STAM_REL_COUNTER_INC(&pShared->StatCannotBlock); 3122 # if defined(RT_OS_LINUX) 3123 SUP_DPRINTF(("vmmR0LoggerFlush: Signalling not safe and EMT blocking disabled! (%u bytes)\n", cbToFlush)); 3124 # endif 3125 pShared->cbDropped += cbToFlush; 3126 return true; 3127 } 3128 } 3129 else 3130 #endif 3131 if (VMMRZCallRing3IsEnabled(pGVCpu)) 3132 enmAction = kPrepSignalAndWait; 3133 else 3134 { 3135 STAM_REL_COUNTER_INC(&pShared->StatCannotBlock); 3136 # if defined(RT_OS_LINUX) 3137 SUP_DPRINTF(("vmmR0LoggerFlush: EMT blocking disabled! (%u bytes)\n", cbToFlush)); 3138 # endif 3139 pShared->cbDropped += cbToFlush; 3140 return true; 3141 } 3142 3143 /* 3144 * Prepare for blocking if necessary. 3145 */ 3146 VMMR0EMTBLOCKCTX Ctx; 3147 if (enmAction != kJustSignal) 3148 { 3149 int rc = VMMR0EmtPrepareToBlock(pGVCpu, VINF_SUCCESS, "vmmR0LoggerFlushCommon", pR0Log->hEventFlushWait, &Ctx); 3150 if (RT_SUCCESS(rc)) 3151 { /* likely */ } 3152 else 3153 { 3154 STAM_REL_COUNTER_INC(&pShared->StatCannotBlock); 3155 SUP_DPRINTF(("vmmR0LoggerFlush: VMMR0EmtPrepareToBlock failed! rc=%d\n", rc)); 3156 return false; 3157 } 3158 } 3159 3160 /* 3161 * Queue the flush job. 3162 */ 3163 bool fFlushedBuffer; 3164 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock); 3165 if (pGVM->vmmr0.s.LogFlusher.fThreadRunning) 3166 { 3167 uint32_t const idxHead = pGVM->vmmr0.s.LogFlusher.idxRingHead % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing); 3168 uint32_t const idxTail = pGVM->vmmr0.s.LogFlusher.idxRingTail % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing); 3169 uint32_t const idxNewTail = (idxTail + 1) % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing); 3170 if (idxNewTail != idxHead) 3171 { 3172 /* Queue it. */ 3173 pGVM->vmmr0.s.LogFlusher.aRing[idxTail].s.idCpu = pGVCpu->idCpu; 3174 pGVM->vmmr0.s.LogFlusher.aRing[idxTail].s.idxLogger = idxLogger; 3175 pGVM->vmmr0.s.LogFlusher.aRing[idxTail].s.idxBuffer = (uint32_t)idxBuffer; 3176 pGVM->vmmr0.s.LogFlusher.aRing[idxTail].s.fProcessing = 0; 3177 pGVM->vmmr0.s.LogFlusher.idxRingTail = idxNewTail; 3178 3179 /* Update the number of buffers currently being flushed. */ 3180 uint32_t cFlushing = pR0Log->cFlushing; 3181 cFlushing = RT_MIN(cFlushing + 1, VMMLOGGER_BUFFER_COUNT); 3182 pShared->cFlushing = pR0Log->cFlushing = cFlushing; 3183 3184 /* We must wait if all buffers are currently being flushed. */ 3185 bool const fEmtWaiting = cFlushing >= VMMLOGGER_BUFFER_COUNT && enmAction != kJustSignal /* paranoia */; 3186 pR0Log->fEmtWaiting = fEmtWaiting; 3187 3188 /* Stats. */ 3189 STAM_REL_COUNTER_INC(&pShared->StatFlushes); 3190 STAM_REL_COUNTER_INC(&pGVM->vmm.s.StatLogFlusherFlushes); 3191 3192 /* Signal the worker thread. */ 3193 if (pGVM->vmmr0.s.LogFlusher.fThreadWaiting) 3194 { 3195 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock); 3196 RTSemEventSignal(pGVM->vmmr0.s.LogFlusher.hEvent); 3197 } 3198 else 3199 { 3200 STAM_REL_COUNTER_INC(&pGVM->vmm.s.StatLogFlusherNoWakeUp); 3201 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock); 3202 } 3203 3204 /* 3205 * Wait for a buffer to finish flushing. 3206 * 3207 * Note! Lazy bird is ignoring the status code here. The result is 3208 * that we might end up with an extra even signalling and the 3209 * next time we need to wait we won't and end up with some log 3210 * corruption. However, it's too much hazzle right now for 3211 * a scenario which would most likely end the process rather 3212 * than causing log corruption. 3213 */ 3214 if (fEmtWaiting) 3215 { 3216 STAM_REL_PROFILE_START(&pShared->StatWait, a); 3217 VMMR0EmtWaitEventInner(pGVCpu, VMMR0EMTWAIT_F_TRY_SUPPRESS_INTERRUPTED, 3218 pR0Log->hEventFlushWait, RT_INDEFINITE_WAIT); 3219 STAM_REL_PROFILE_STOP(&pShared->StatWait, a); 3220 } 3221 3222 /* 3223 * We always switch buffer if we have more than one. 3224 */ 3225 AssertCompile(VMMLOGGER_BUFFER_COUNT >= 1); 3226 fFlushedBuffer = VMMLOGGER_BUFFER_COUNT == 1; 3227 } 3228 else 3229 { 3230 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock); 3231 SUP_DPRINTF(("vmmR0LoggerFlush: ring buffer is full!\n")); 3232 fFlushedBuffer = true; 3233 } 3234 } 3235 else 3236 { 3237 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock); 3238 SUP_DPRINTF(("vmmR0LoggerFlush: flusher not active - dropping %u bytes\n", cbToFlush)); 3239 fFlushedBuffer = true; 3240 } 3241 3242 /* 3243 * Restore the HM context. 3244 */ 3245 if (enmAction != kJustSignal) 3246 VMMR0EmtResumeAfterBlocking(pGVCpu, &Ctx); 3247 3248 return fFlushedBuffer; 3249 } 3250 3251 3091 3252 /** 3092 3253 * Common worker for vmmR0LogFlush and vmmR0LogRelFlush. … … 3094 3255 static bool vmmR0LoggerFlushCommon(PRTLOGGER pLogger, PRTLOGBUFFERDESC pBufDesc, uint32_t idxLogger) 3095 3256 { 3096 RT_NOREF(pBufDesc);3097 3098 3257 /* 3099 3258 * Convert the pLogger into a GVMCPU handle and 'call' back to Ring-3. … … 3116 3275 { 3117 3276 PVMMR0PERVCPULOGGER const pR0Log = &pGVCpu->vmmr0.s.u.aLoggers[idxLogger]; 3118 PVMMR3CPULOGGER const pShared = &pGVCpu->vmm.s.u.aLoggers[idxLogger];3119 3277 size_t const idxBuffer = pBufDesc - &pR0Log->aBufDescs[0]; 3120 if (idxBuffer < RT_ELEMENTS(pR0Log->aBufDescs))3278 if (idxBuffer < VMMLOGGER_BUFFER_COUNT) 3121 3279 { 3122 3280 /* 3123 * Can we wait on the log flusher to do the work? 3281 * Make sure we don't recurse forever here should something in the 3282 * following code trigger logging or an assertion. Do the rest in 3283 * an inner work to avoid hitting the right margin too hard. 3124 3284 */ 3125 if ( VMMRZCallRing3IsEnabled(pGVCpu))3285 if (!pR0Log->fFlushing) 3126 3286 { 3127 /* 3128 * Make sure we don't recurse forever here should something in the 3129 * following code trigger logging or an assertion. 3130 */ 3131 if (!pR0Log->fFlushing) 3132 { 3133 pR0Log->fFlushing = true; 3134 3135 /* 3136 * Prepare to block waiting for the flusher thread. 3137 */ 3138 VMMR0EMTBLOCKCTX Ctx; 3139 int rc = VMMR0EmtPrepareToBlock(pGVCpu, VINF_SUCCESS, "vmmR0LoggerFlushCommon", 3140 pR0Log->hEventFlushWait, &Ctx); 3141 if (RT_SUCCESS(rc)) 3142 { 3143 /* 3144 * Queue the flush job. 3145 */ 3146 RTSpinlockAcquire(pGVM->vmmr0.s.LogFlusher.hSpinlock); 3147 if (pGVM->vmmr0.s.LogFlusher.fThreadRunning) 3148 { 3149 uint32_t const idxHead = pGVM->vmmr0.s.LogFlusher.idxRingHead 3150 % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing); 3151 uint32_t const idxTail = pGVM->vmmr0.s.LogFlusher.idxRingTail 3152 % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing); 3153 uint32_t const idxNewTail = (idxTail + 1) 3154 % RT_ELEMENTS(pGVM->vmmr0.s.LogFlusher.aRing); 3155 if (idxNewTail != idxHead) 3156 { 3157 pGVM->vmmr0.s.LogFlusher.aRing[idxTail].s.idCpu = pGVCpu->idCpu; 3158 pGVM->vmmr0.s.LogFlusher.aRing[idxTail].s.idxLogger = idxLogger; 3159 pGVM->vmmr0.s.LogFlusher.aRing[idxTail].s.idxBuffer = (uint32_t)idxBuffer; 3160 pGVM->vmmr0.s.LogFlusher.aRing[idxTail].s.fProcessing = 0; 3161 pGVM->vmmr0.s.LogFlusher.idxRingTail = idxNewTail; 3162 3163 pShared->cFlushing = pR0Log->cFlushing = RT_MIN(pR0Log->cFlushing + 1, 3164 VMMLOGGER_BUFFER_COUNT); 3165 3166 STAM_REL_COUNTER_INC(&pShared->StatFlushes); 3167 STAM_REL_COUNTER_INC(&pGVM->vmm.s.StatLogFlusherFlushes); 3168 3169 if (pGVM->vmmr0.s.LogFlusher.fThreadWaiting) 3170 { 3171 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock); 3172 RTSemEventSignal(pGVM->vmmr0.s.LogFlusher.hEvent); 3173 } 3174 else 3175 { 3176 STAM_REL_COUNTER_INC(&pGVM->vmm.s.StatLogFlusherNoWakeUp); 3177 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock); 3178 } 3179 3180 /* 3181 * Wait for it to complete. 3182 */ 3183 STAM_REL_PROFILE_START(&pShared->StatWait, a); 3184 VMMR0EmtWaitEventInner(pGVCpu, VMMR0EMTWAIT_F_TRY_SUPPRESS_INTERRUPTED, 3185 pR0Log->hEventFlushWait, RT_INDEFINITE_WAIT); 3186 STAM_REL_PROFILE_STOP(&pShared->StatWait, a); 3187 } 3188 else 3189 { 3190 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock); 3191 SUP_DPRINTF(("vmmR0LoggerFlush: ring buffer is full!\n")); 3192 } 3193 } 3194 else 3195 { 3196 RTSpinlockRelease(pGVM->vmmr0.s.LogFlusher.hSpinlock); 3197 SUP_DPRINTF(("vmmR0LoggerFlush: flusher not active - dropping %u bytes\n", 3198 pBufDesc->offBuf)); 3199 } 3200 VMMR0EmtResumeAfterBlocking(pGVCpu, &Ctx); 3201 } 3202 else 3203 { 3204 STAM_REL_COUNTER_INC(&pShared->StatCannotBlock); 3205 SUP_DPRINTF(("vmmR0LoggerFlush: VMMR0EmtPrepareToBlock failed! rc=%d\n", rc)); 3206 } 3207 pR0Log->fFlushing = false; 3208 } 3209 else 3210 SUP_DPRINTF(("vmmR0LoggerFlush: Recursive flushing!\n")); 3287 pR0Log->fFlushing = true; 3288 bool fFlushed = vmmR0LoggerFlushInner(pGVM, pGVCpu, idxLogger, idxBuffer, pBufDesc->offBuf); 3289 pR0Log->fFlushing = false; 3290 return fFlushed; 3211 3291 } 3212 else 3213 { 3214 STAM_REL_COUNTER_INC(&pShared->StatCannotBlock); 3215 #if defined(RT_OS_LINUX) 3216 SUP_DPRINTF(("vmmR0LoggerFlush: EMT blocking disabled! (%u bytes)\n", pBufDesc->offBuf)); 3217 #endif 3218 pShared->cbDropped += pBufDesc->offBuf; 3219 } 3292 3293 SUP_DPRINTF(("vmmR0LoggerFlush: Recursive flushing!\n")); 3220 3294 } 3221 3295 else
Note:
See TracChangeset
for help on using the changeset viewer.