Changeset 92408 in vbox for trunk/src/VBox/VMM/VMMR3
- Timestamp:
- Nov 12, 2021 9:49:06 PM (3 years ago)
- Location:
- trunk/src/VBox/VMM/VMMR3
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR3/VMM.cpp
r92392 r92408 165 165 * Internal Functions * 166 166 *********************************************************************************************************************************/ 167 static int vmmR3InitStacks(PVM pVM);168 167 static void vmmR3InitRegisterStats(PVM pVM); 169 168 static DECLCALLBACK(int) vmmR3Save(PVM pVM, PSSMHANDLE pSSM); … … 280 279 return rc; 281 280 282 /* 283 * Init various sub-components. 284 */ 285 rc = vmmR3InitStacks(pVM); 281 #ifdef VBOX_WITH_NMI 282 /* 283 * Allocate mapping for the host APIC. 284 */ 285 rc = MMR3HyperReserve(pVM, PAGE_SIZE, "Host APIC", &pVM->vmm.s.GCPtrApicBase); 286 AssertRC(rc); 287 #endif 286 288 if (RT_SUCCESS(rc)) 287 289 { 288 #ifdef VBOX_WITH_NMI289 290 /* 290 * Allocate mapping for the host APIC.291 * Start the log flusher thread. 291 292 */ 292 rc = MMR3HyperReserve(pVM, PAGE_SIZE, "Host APIC", &pVM->vmm.s.GCPtrApicBase); 293 AssertRC(rc); 294 #endif 293 rc = RTThreadCreate(&pVM->vmm.s.hLogFlusherThread, vmmR3LogFlusher, pVM, 0 /*cbStack*/, 294 RTTHREADTYPE_IO, RTTHREADFLAGS_WAITABLE, "R0LogWrk"); 295 295 if (RT_SUCCESS(rc)) 296 296 { 297 297 298 /* 298 * Start the log flusher thread.299 * Debug info and statistics. 299 300 */ 300 rc = RTThreadCreate(&pVM->vmm.s.hLogFlusherThread, vmmR3LogFlusher, pVM, 0 /*cbStack*/, 301 RTTHREADTYPE_IO, RTTHREADFLAGS_WAITABLE, "R0LogWrk"); 302 if (RT_SUCCESS(rc)) 303 { 304 305 /* 306 * Debug info and statistics. 307 */ 308 DBGFR3InfoRegisterInternal(pVM, "fflags", "Displays the current Forced actions Flags.", vmmR3InfoFF); 309 vmmR3InitRegisterStats(pVM); 310 vmmInitFormatTypes(); 311 312 return VINF_SUCCESS; 313 } 301 DBGFR3InfoRegisterInternal(pVM, "fflags", "Displays the current Forced actions Flags.", vmmR3InfoFF); 302 vmmR3InitRegisterStats(pVM); 303 vmmInitFormatTypes(); 304 305 return VINF_SUCCESS; 314 306 } 315 307 } 316 308 /** @todo Need failure cleanup? */ 317 318 return rc;319 }320 321 322 /**323 * Allocate & setup the VMM RC stack(s) (for EMTs).324 *325 * The stacks are also used for long jumps in Ring-0.326 *327 * @returns VBox status code.328 * @param pVM The cross context VM structure.329 *330 * @remarks The optional guard page gets it protection setup up during R3 init331 * completion because of init order issues.332 */333 static int vmmR3InitStacks(PVM pVM)334 {335 int rc = VINF_SUCCESS;336 #ifdef VMM_R0_SWITCH_STACK337 uint32_t fFlags = MMHYPER_AONR_FLAGS_KERNEL_MAPPING;338 #else339 uint32_t fFlags = 0;340 #endif341 342 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)343 {344 PVMCPU pVCpu = pVM->apCpusR3[idCpu];345 346 #ifdef VBOX_STRICT_VMM_STACK347 rc = MMR3HyperAllocOnceNoRelEx(pVM, PAGE_SIZE + VMM_STACK_SIZE + PAGE_SIZE,348 #else349 rc = MMR3HyperAllocOnceNoRelEx(pVM, VMM_STACK_SIZE,350 #endif351 PAGE_SIZE, MM_TAG_VMM, fFlags, (void **)&pVCpu->vmm.s.pbEMTStackR3);352 if (RT_SUCCESS(rc))353 {354 #ifdef VBOX_STRICT_VMM_STACK355 pVCpu->vmm.s.pbEMTStackR3 += PAGE_SIZE;356 #endif357 pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack = MMHyperR3ToR0(pVM, pVCpu->vmm.s.pbEMTStackR3);358 359 }360 }361 309 362 310 return rc; … … 433 381 STAMR3Register(pVM, &pVM->vmm.s.StatLogFlusherNoWakeUp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, "/VMM/LogFlush/00-NoWakups", STAMUNIT_OCCURENCES, "Times the flusher thread didn't need waking up."); 434 382 435 #ifdef VBOX_WITH_STATISTICS436 for (VMCPUID i = 0; i < pVM->cCpus; i++)437 {438 PVMCPU pVCpu = pVM->apCpusR3[i];439 STAMR3RegisterF(pVM, &pVCpu->vmm.s.CallRing3JmpBufR0.cbUsedMax, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Max amount of stack used.", "/VMM/Stack/CPU%u/Max", i);440 STAMR3RegisterF(pVM, &pVCpu->vmm.s.CallRing3JmpBufR0.cbUsedAvg, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Average stack usage.", "/VMM/Stack/CPU%u/Avg", i);441 STAMR3RegisterF(pVM, &pVCpu->vmm.s.CallRing3JmpBufR0.cUsedTotal, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of stack usages.", "/VMM/Stack/CPU%u/Uses", i);442 }443 #endif444 383 for (VMCPUID i = 0; i < pVM->cCpus; i++) 445 384 { … … 2265 2204 AssertReturn(cbRead < ~(size_t)0 / 2, VERR_INVALID_PARAMETER); 2266 2205 2267 int rc; 2268 #ifdef VMM_R0_SWITCH_STACK 2269 RTHCUINTPTR off = R0Addr - MMHyperCCToR0(pVM, pVCpu->vmm.s.pbEMTStackR3); 2270 #else 2271 RTHCUINTPTR off = pVCpu->vmm.s.CallRing3JmpBufR0.cbSavedStack - (pVCpu->vmm.s.CallRing3JmpBufR0.SpCheck - R0Addr); 2272 #endif 2273 if ( off < VMM_STACK_SIZE 2274 && off + cbRead <= VMM_STACK_SIZE) 2275 { 2276 memcpy(pvBuf, &pVCpu->vmm.s.pbEMTStackR3[off], cbRead); 2277 rc = VINF_SUCCESS; 2206 /* 2207 * Hopefully we've got all the requested bits. If not supply what we 2208 * can and zero the remaining stuff. 2209 */ 2210 RTHCUINTPTR off = R0Addr - pVCpu->vmm.s.AssertJmpBuf.UnwindSp; 2211 if (off < pVCpu->vmm.s.AssertJmpBuf.cbStackValid) 2212 { 2213 size_t const cbValid = pVCpu->vmm.s.AssertJmpBuf.cbStackValid - off; 2214 if (cbRead <= cbValid) 2215 { 2216 memcpy(pvBuf, &pVCpu->vmm.s.abAssertStack[off], cbRead); 2217 return VINF_SUCCESS; 2218 } 2219 2220 memcpy(pvBuf, &pVCpu->vmm.s.abAssertStack[off], cbValid); 2221 RT_BZERO((uint8_t *)pvBuf + cbValid, cbRead - cbValid); 2278 2222 } 2279 2223 else 2280 rc = VERR_INVALID_POINTER; 2281 2282 /* Supply the setjmp return RIP/EIP. */ 2283 if ( pVCpu->vmm.s.CallRing3JmpBufR0.UnwindRetPcLocation + sizeof(RTR0UINTPTR) > R0Addr 2284 && pVCpu->vmm.s.CallRing3JmpBufR0.UnwindRetPcLocation < R0Addr + cbRead) 2285 { 2286 uint8_t const *pbSrc = (uint8_t const *)&pVCpu->vmm.s.CallRing3JmpBufR0.UnwindRetPcValue; 2287 size_t cbSrc = sizeof(pVCpu->vmm.s.CallRing3JmpBufR0.UnwindRetPcValue); 2224 RT_BZERO(pvBuf, cbRead); 2225 2226 /* 2227 * Supply the setjmp return RIP/EIP if requested. 2228 */ 2229 if ( pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcLocation + sizeof(RTR0UINTPTR) > R0Addr 2230 && pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcLocation < R0Addr + cbRead) 2231 { 2232 uint8_t const *pbSrc = (uint8_t const *)&pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcValue; 2233 size_t cbSrc = sizeof(pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcValue); 2288 2234 size_t offDst = 0; 2289 if (R0Addr < pVCpu->vmm.s. CallRing3JmpBufR0.UnwindRetPcLocation)2290 offDst = pVCpu->vmm.s. CallRing3JmpBufR0.UnwindRetPcLocation - R0Addr;2291 else if (R0Addr > pVCpu->vmm.s. CallRing3JmpBufR0.UnwindRetPcLocation)2292 { 2293 size_t offSrc = R0Addr - pVCpu->vmm.s. CallRing3JmpBufR0.UnwindRetPcLocation;2235 if (R0Addr < pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcLocation) 2236 offDst = pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcLocation - R0Addr; 2237 else if (R0Addr > pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcLocation) 2238 { 2239 size_t offSrc = R0Addr - pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcLocation; 2294 2240 Assert(offSrc < cbSrc); 2295 2241 pbSrc -= offSrc; … … 2300 2246 memcpy((uint8_t *)pvBuf + offDst, pbSrc, cbSrc); 2301 2247 2302 if (cbSrc == cbRead)2303 rc = VINF_SUCCESS;2304 } 2305 2306 return rc;2248 //if (cbSrc == cbRead) 2249 // rc = VINF_SUCCESS; 2250 } 2251 2252 return VINF_SUCCESS; 2307 2253 } 2308 2254 … … 2321 2267 2322 2268 /* 2269 * This is all we really need here if we had proper unwind info (win64 only)... 2270 */ 2271 pState->u.x86.auRegs[X86_GREG_xBP] = pVCpu->vmm.s.AssertJmpBuf.UnwindBp; 2272 pState->u.x86.auRegs[X86_GREG_xSP] = pVCpu->vmm.s.AssertJmpBuf.UnwindSp; 2273 pState->uPc = pVCpu->vmm.s.AssertJmpBuf.UnwindPc; 2274 2275 /* 2323 2276 * Locate the resume point on the stack. 2324 2277 */ 2325 #ifdef VMM_R0_SWITCH_STACK2326 uintptr_t off = pVCpu->vmm.s.CallRing3JmpBufR0.SpResume - MMHyperCCToR0(pVCpu->pVMR3, pVCpu->vmm.s.pbEMTStackR3);2327 AssertReturnVoid(off < VMM_STACK_SIZE);2328 #else2329 2278 uintptr_t off = 0; 2330 #endif2331 2279 2332 2280 #ifdef RT_ARCH_AMD64 2333 2281 /* 2334 * This code must match the .resume stuff in VMMR0JmpA-amd64.asm exactly. 2335 */ 2336 # ifdef VBOX_STRICT 2337 Assert(*(uint64_t const *)&pVCpu->vmm.s.pbEMTStackR3[off] == UINT32_C(0x7eadf00d)); 2338 off += 8; /* RESUME_MAGIC */ 2339 # endif 2282 * This code must match the vmmR0CallRing3LongJmp stack frame setup in VMMR0JmpA-amd64.asm exactly. 2283 */ 2340 2284 # ifdef RT_OS_WINDOWS 2341 2285 off += 0xa0; /* XMM6 thru XMM15 */ 2342 2286 # endif 2343 pState->u.x86.uRFlags = *(uint64_t const *)&pVCpu->vmm.s. pbEMTStackR3[off];2287 pState->u.x86.uRFlags = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off]; 2344 2288 off += 8; 2345 pState->u.x86.auRegs[X86_GREG_xBX] = *(uint64_t const *)&pVCpu->vmm.s. pbEMTStackR3[off];2289 pState->u.x86.auRegs[X86_GREG_xBX] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off]; 2346 2290 off += 8; 2347 2291 # ifdef RT_OS_WINDOWS 2348 pState->u.x86.auRegs[X86_GREG_xSI] = *(uint64_t const *)&pVCpu->vmm.s. pbEMTStackR3[off];2292 pState->u.x86.auRegs[X86_GREG_xSI] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off]; 2349 2293 off += 8; 2350 pState->u.x86.auRegs[X86_GREG_xDI] = *(uint64_t const *)&pVCpu->vmm.s. pbEMTStackR3[off];2294 pState->u.x86.auRegs[X86_GREG_xDI] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off]; 2351 2295 off += 8; 2352 2296 # endif 2353 pState->u.x86.auRegs[X86_GREG_x12] = *(uint64_t const *)&pVCpu->vmm.s. pbEMTStackR3[off];2297 pState->u.x86.auRegs[X86_GREG_x12] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off]; 2354 2298 off += 8; 2355 pState->u.x86.auRegs[X86_GREG_x13] = *(uint64_t const *)&pVCpu->vmm.s. pbEMTStackR3[off];2299 pState->u.x86.auRegs[X86_GREG_x13] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off]; 2356 2300 off += 8; 2357 pState->u.x86.auRegs[X86_GREG_x14] = *(uint64_t const *)&pVCpu->vmm.s. pbEMTStackR3[off];2301 pState->u.x86.auRegs[X86_GREG_x14] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off]; 2358 2302 off += 8; 2359 pState->u.x86.auRegs[X86_GREG_x15] = *(uint64_t const *)&pVCpu->vmm.s. pbEMTStackR3[off];2303 pState->u.x86.auRegs[X86_GREG_x15] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off]; 2360 2304 off += 8; 2361 pState->u.x86.auRegs[X86_GREG_xBP] = *(uint64_t const *)&pVCpu->vmm.s. pbEMTStackR3[off];2305 pState->u.x86.auRegs[X86_GREG_xBP] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off]; 2362 2306 off += 8; 2363 pState->uPc = *(uint64_t const *)&pVCpu->vmm.s. pbEMTStackR3[off];2364 off += 8;2307 pState->uPc = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off]; 2308 pState->u.x86.auRegs[X86_GREG_xSP] = pVCpu->vmm.s.AssertJmpBuf.UnwindRetSp; 2365 2309 2366 2310 #elif defined(RT_ARCH_X86) 2367 2311 /* 2368 * This code must match the .resume stuff in VMMR0JmpA-x86.asm exactly. 2369 */ 2370 # ifdef VBOX_STRICT 2371 Assert(*(uint32_t const *)&pVCpu->vmm.s.pbEMTStackR3[off] == UINT32_C(0x7eadf00d)); 2372 off += 4; /* RESUME_MAGIC */ 2373 # endif 2374 pState->u.x86.uRFlags = *(uint32_t const *)&pVCpu->vmm.s.pbEMTStackR3[off]; 2312 * This code must match the vmmR0CallRing3LongJmp stack frame setup in VMMR0JmpA-x86.asm exactly. 2313 */ 2314 pState->u.x86.uRFlags = *(uint32_t const *)&pVCpu->vmm.s.abAssertStack[off]; 2375 2315 off += 4; 2376 pState->u.x86.auRegs[X86_GREG_xBX] = *(uint32_t const *)&pVCpu->vmm.s. pbEMTStackR3[off];2316 pState->u.x86.auRegs[X86_GREG_xBX] = *(uint32_t const *)&pVCpu->vmm.s.abAssertStack[off]; 2377 2317 off += 4; 2378 pState->u.x86.auRegs[X86_GREG_xSI] = *(uint32_t const *)&pVCpu->vmm.s. pbEMTStackR3[off];2318 pState->u.x86.auRegs[X86_GREG_xSI] = *(uint32_t const *)&pVCpu->vmm.s.abAssertStack[off]; 2379 2319 off += 4; 2380 pState->u.x86.auRegs[X86_GREG_xDI] = *(uint32_t const *)&pVCpu->vmm.s. pbEMTStackR3[off];2320 pState->u.x86.auRegs[X86_GREG_xDI] = *(uint32_t const *)&pVCpu->vmm.s.abAssertStack[off]; 2381 2321 off += 4; 2382 pState->u.x86.auRegs[X86_GREG_xBP] = *(uint32_t const *)&pVCpu->vmm.s. pbEMTStackR3[off];2322 pState->u.x86.auRegs[X86_GREG_xBP] = *(uint32_t const *)&pVCpu->vmm.s.abAssertStack[off]; 2383 2323 off += 4; 2384 pState->uPc = *(uint32_t const *)&pVCpu->vmm.s. pbEMTStackR3[off];2385 off += 4;2324 pState->uPc = *(uint32_t const *)&pVCpu->vmm.s.abAssertStack[off]; 2325 pState->u.x86.auRegs[X86_GREG_xSP] = pVCpu->vmm.s.AssertJmpBuf.UnwindRetSp; 2386 2326 #else 2387 2327 # error "Port me" 2388 2328 #endif 2389 2390 /*2391 * This is all we really need here, though the above helps if the assembly2392 * doesn't contain unwind info (currently only on win/64, so that is useful).2393 */2394 pState->u.x86.auRegs[X86_GREG_xBP] = pVCpu->vmm.s.CallRing3JmpBufR0.SavedEbp;2395 pState->u.x86.auRegs[X86_GREG_xSP] = pVCpu->vmm.s.CallRing3JmpBufR0.SpResume;2396 2329 } 2397 2330 … … 2464 2397 static int vmmR3HandleRing0Assert(PVM pVM, PVMCPU pVCpu) 2465 2398 { 2466 /* 2467 * Signal a ring 0 hypervisor assertion. 2468 * Cancel the longjmp operation that's in progress. 2469 */ 2470 pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call = false; 2471 #ifdef RT_ARCH_X86 2472 pVCpu->vmm.s.CallRing3JmpBufR0.eip = 0; 2473 #else 2474 pVCpu->vmm.s.CallRing3JmpBufR0.rip = 0; 2475 #endif 2476 #ifdef VMM_R0_SWITCH_STACK 2477 *(uint64_t *)pVCpu->vmm.s.pbEMTStackR3 = 0; /* clear marker */ 2478 #endif 2399 RT_NOREF(pVCpu); 2479 2400 LogRel(("%s", pVM->vmm.s.szRing0AssertMsg1)); 2480 2401 LogRel(("%s", pVM->vmm.s.szRing0AssertMsg2)); -
trunk/src/VBox/VMM/VMMR3/VMMGuruMeditation.cpp
r90829 r92408 362 362 case VINF_EM_TRIPLE_FAULT: 363 363 case VERR_VMM_HYPER_CR3_MISMATCH: 364 case VERR_VMM_SET_JMP_ERROR:365 case VERR_VMM_SET_JMP_ABORTED_RESUME:366 case VERR_VMM_SET_JMP_STACK_OVERFLOW:367 364 case VERR_VMM_LONG_JMP_ERROR: 368 365 { … … 398 395 * Dump the relevant hypervisor registers and stack. 399 396 */ 400 if ( rcErr == VERR_VMM_RING0_ASSERTION /* fInRing3Call has already been cleared here. */ 401 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call) 397 if (rcErr == VERR_VMM_RING0_ASSERTION) 402 398 { 403 399 /* Dump the jmpbuf. */ 404 400 pHlp->pfnPrintf(pHlp, 405 401 "!!\n" 406 "!! CallRing3JmpBuf:\n"402 "!! AssertJmpBuf:\n" 407 403 "!!\n"); 408 404 pHlp->pfnPrintf(pHlp, 409 "SavedEsp=%RHv SavedEbp=%RHv SpResume=%RHv SpCheck=%RHv\n", 410 pVCpu->vmm.s.CallRing3JmpBufR0.SavedEsp, 411 pVCpu->vmm.s.CallRing3JmpBufR0.SavedEbp, 412 pVCpu->vmm.s.CallRing3JmpBufR0.SpResume, 413 pVCpu->vmm.s.CallRing3JmpBufR0.SpCheck); 414 pHlp->pfnPrintf(pHlp, 415 "pvSavedStack=%RHv cbSavedStack=%#x fInRing3Call=%RTbool\n", 416 pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack, 417 pVCpu->vmm.s.CallRing3JmpBufR0.cbSavedStack, 418 pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call); 419 pHlp->pfnPrintf(pHlp, 420 "cbUsedMax=%#x cbUsedAvg=%#x cbUsedTotal=%#llx cUsedTotal=%#llx\n", 421 pVCpu->vmm.s.CallRing3JmpBufR0.cbUsedMax, 422 pVCpu->vmm.s.CallRing3JmpBufR0.cbUsedAvg, 423 pVCpu->vmm.s.CallRing3JmpBufR0.cbUsedTotal, 424 pVCpu->vmm.s.CallRing3JmpBufR0.cUsedTotal); 405 "UnwindSp=%RHv UnwindRetSp=%RHv UnwindBp=%RHv UnwindPc=%RHv\n", 406 pVCpu->vmm.s.AssertJmpBuf.UnwindSp, 407 pVCpu->vmm.s.AssertJmpBuf.UnwindRetSp, 408 pVCpu->vmm.s.AssertJmpBuf.UnwindBp, 409 pVCpu->vmm.s.AssertJmpBuf.UnwindPc); 410 pHlp->pfnPrintf(pHlp, 411 "UnwindRetPcValue=%RHv UnwindRetPcLocation=%RHv\n", 412 pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcValue, 413 pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcLocation); 425 414 pHlp->pfnPrintf(pHlp, 426 415 "pfn=%RHv pvUser1=%RHv pvUser2=%RHv\n", 427 pVCpu->vmm.s. CallRing3JmpBufR0.pfn,428 pVCpu->vmm.s. CallRing3JmpBufR0.pvUser1,429 pVCpu->vmm.s. CallRing3JmpBufR0.pvUser2);416 pVCpu->vmm.s.AssertJmpBuf.pfn, 417 pVCpu->vmm.s.AssertJmpBuf.pvUser1, 418 pVCpu->vmm.s.AssertJmpBuf.pvUser2); 430 419 431 420 /* Dump the resume register frame on the stack. */ 432 PRTHCUINTPTR pBP; 433 #ifdef VMM_R0_SWITCH_STACK 434 pBP = (PRTHCUINTPTR)&pVCpu->vmm.s.pbEMTStackR3[ pVCpu->vmm.s.CallRing3JmpBufR0.SavedEbp 435 - MMHyperCCToR0(pVM, pVCpu->vmm.s.pbEMTStackR3)]; 436 #else 437 pBP = (PRTHCUINTPTR)&pVCpu->vmm.s.pbEMTStackR3[ pVCpu->vmm.s.CallRing3JmpBufR0.cbSavedStack 438 - pVCpu->vmm.s.CallRing3JmpBufR0.SpCheck 439 + pVCpu->vmm.s.CallRing3JmpBufR0.SavedEbp]; 440 #endif 421 PRTHCUINTPTR const pBP = (PRTHCUINTPTR)&pVCpu->vmm.s.abAssertStack[ pVCpu->vmm.s.AssertJmpBuf.UnwindBp 422 - pVCpu->vmm.s.AssertJmpBuf.UnwindSp]; 441 423 #if HC_ARCH_BITS == 32 442 424 pHlp->pfnPrintf(pHlp, … … 445 427 , 446 428 pBP[-3], pBP[-2], pBP[-1], 447 pBP[1], pVCpu->vmm.s. CallRing3JmpBufR0.SavedEbp - 8, pBP[0], pBP[-4]);429 pBP[1], pVCpu->vmm.s.AssertJmpBuf.SavedEbp - 8, pBP[0], pBP[-4]); 448 430 #else 449 431 # ifdef RT_OS_WINDOWS … … 459 441 pBP[-4], pBP[-3], 460 442 pBP[-2], pBP[-1], 461 pBP[1], pVCpu->vmm.s. CallRing3JmpBufR0.SavedEbp - 16, pBP[0], pBP[-8]);443 pBP[1], pVCpu->vmm.s.AssertJmpBuf.UnwindRetSp, pBP[0], pBP[-8]); 462 444 # else 463 445 pHlp->pfnPrintf(pHlp, … … 471 453 pBP[-4], pBP[-3], 472 454 pBP[-2], pBP[-1], 473 pBP[1], pVCpu->vmm.s. CallRing3JmpBufR0.SavedEbp - 16, pBP[0], pBP[-6]);455 pBP[1], pVCpu->vmm.s.AssertJmpBuf.UnwindRetSp, pBP[0], pBP[-6]); 474 456 # endif 475 457 #endif … … 479 461 PCDBGFSTACKFRAME pFirstFrame; 480 462 rc2 = DBGFR3StackWalkBeginEx(pVM->pUVM, pVCpu->idCpu, DBGFCODETYPE_RING0, 481 DBGFR3AddrFromHostR0(&AddrBp, pVCpu->vmm.s. CallRing3JmpBufR0.SavedEbp),482 DBGFR3AddrFromHostR0(&AddrSp, pVCpu->vmm.s. CallRing3JmpBufR0.SpResume),483 DBGFR3AddrFromHostR0(&AddrPc, pVCpu->vmm.s. CallRing3JmpBufR0.SavedEipForUnwind),463 DBGFR3AddrFromHostR0(&AddrBp, pVCpu->vmm.s.AssertJmpBuf.UnwindBp), 464 DBGFR3AddrFromHostR0(&AddrSp, pVCpu->vmm.s.AssertJmpBuf.UnwindSp), 465 DBGFR3AddrFromHostR0(&AddrPc, pVCpu->vmm.s.AssertJmpBuf.UnwindPc), 484 466 RTDBGRETURNTYPE_INVALID, &pFirstFrame); 485 467 if (RT_SUCCESS(rc2)) … … 548 530 549 531 /* Symbols on the stack. */ 550 #ifdef VMM_R0_SWITCH_STACK 551 uint32_t const iLast = VMM_STACK_SIZE / sizeof(uintptr_t); 552 uint32_t iAddr = (uint32_t)( pVCpu->vmm.s.CallRing3JmpBufR0.SavedEsp 553 - MMHyperCCToR0(pVM, pVCpu->vmm.s.pbEMTStackR3)) / sizeof(uintptr_t); 554 if (iAddr > iLast) 555 iAddr = 0; 556 #else 557 uint32_t const iLast = RT_MIN(pVCpu->vmm.s.CallRing3JmpBufR0.cbSavedStack, VMM_STACK_SIZE) 558 / sizeof(uintptr_t); 559 uint32_t iAddr = 0; 560 #endif 532 uint32_t const cbRawStack = RT_MIN(pVCpu->vmm.s.AssertJmpBuf.cbStackValid, sizeof(pVCpu->vmm.s.abAssertStack)); 533 uintptr_t const * const pauAddr = (uintptr_t const *)&pVCpu->vmm.s.abAssertStack[0]; 534 uint32_t const iEnd = cbRawStack / sizeof(uintptr_t); 535 uint32_t iAddr = 0; 561 536 pHlp->pfnPrintf(pHlp, 562 537 "!!\n" 563 "!! Addresses on the stack (iAddr=%#x, i Last=%#x)\n"538 "!! Addresses on the stack (iAddr=%#x, iEnd=%#x)\n" 564 539 "!!\n", 565 iAddr, iLast); 566 uintptr_t const *paAddr = (uintptr_t const *)pVCpu->vmm.s.pbEMTStackR3; 567 while (iAddr < iLast) 540 iAddr, iEnd); 541 while (iAddr < iEnd) 568 542 { 569 uintptr_t const uAddr = pa Addr[iAddr];543 uintptr_t const uAddr = pauAddr[iAddr]; 570 544 if (uAddr > X86_PAGE_SIZE) 571 545 { 572 546 DBGFADDRESS Addr; 573 547 DBGFR3AddrFromFlat(pVM->pUVM, &Addr, uAddr); 574 RTGCINTPTR offDisp = 0; 575 PRTDBGSYMBOL pSym = DBGFR3AsSymbolByAddrA(pVM->pUVM, DBGF_AS_R0, &Addr, 576 RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL | RTDBGSYMADDR_FLAGS_SKIP_ABS_IN_DEFERRED, 577 &offDisp, NULL); 578 RTGCINTPTR offLineDisp; 579 PRTDBGLINE pLine = DBGFR3AsLineByAddrA(pVM->pUVM, DBGF_AS_R0, &Addr, &offLineDisp, NULL); 548 RTGCINTPTR offDisp = 0; 549 RTGCINTPTR offLineDisp = 0; 550 PRTDBGSYMBOL pSym = DBGFR3AsSymbolByAddrA(pVM->pUVM, DBGF_AS_R0, &Addr, 551 RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL 552 | RTDBGSYMADDR_FLAGS_SKIP_ABS_IN_DEFERRED, 553 &offDisp, NULL); 554 PRTDBGLINE pLine = DBGFR3AsLineByAddrA(pVM->pUVM, DBGF_AS_R0, &Addr, &offLineDisp, NULL); 580 555 if (pLine || pSym) 581 556 { … … 599 574 "!!\n" 600 575 "!! Raw stack (mind the direction).\n" 601 "!! pbEMTStackR0=%RHv pbEMTStackBottomR0=%RHv VMM_STACK_SIZE=%#x\n"576 "!! pbEMTStackR0=%RHv cbRawStack=%#x\n" 602 577 "!! pbEmtStackR3=%p\n" 603 578 "!!\n" 604 579 "%.*Rhxd\n", 605 MMHyperCCToR0(pVM, pVCpu->vmm.s.pbEMTStackR3), 606 MMHyperCCToR0(pVM, pVCpu->vmm.s.pbEMTStackR3) + VMM_STACK_SIZE, 607 VMM_STACK_SIZE, 608 pVCpu->vmm.s.pbEMTStackR3, 609 VMM_STACK_SIZE, pVCpu->vmm.s.pbEMTStackR3); 580 pVCpu->vmm.s.AssertJmpBuf.UnwindSp, cbRawStack, 581 &pVCpu->vmm.s.abAssertStack[0], 582 cbRawStack, &pVCpu->vmm.s.abAssertStack[0]); 610 583 } 611 584 else
Note:
See TracChangeset
for help on using the changeset viewer.