VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/DBGFStack.cpp

Last change on this file was 106383, checked in by vboxsync, 7 weeks ago

VMM/DBGFStack: Some early stack walking code, bugref:10393

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 58.2 KB
Line 
1/* $Id: DBGFStack.cpp 106383 2024-10-16 13:54:40Z vboxsync $ */
2/** @file
3 * DBGF - Debugger Facility, Call Stack Analyser.
4 */
5
6/*
7 * Copyright (C) 2006-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_DBGF
33#include <VBox/vmm/dbgf.h>
34#include <VBox/vmm/selm.h>
35#include <VBox/vmm/mm.h>
36#include "DBGFInternal.h"
37#include <VBox/vmm/vm.h>
38#include <VBox/vmm/uvm.h>
39#include <VBox/err.h>
40#include <VBox/log.h>
41#include <iprt/param.h>
42#include <iprt/assert.h>
43#include <iprt/alloca.h>
44#include <iprt/mem.h>
45#include <iprt/string.h>
46#include <iprt/formats/pecoff.h>
47
48
49/*********************************************************************************************************************************
50* Structures and Typedefs *
51*********************************************************************************************************************************/
52static DECLCALLBACK(int) dbgfR3StackReadCallback(PRTDBGUNWINDSTATE pThis, RTUINTPTR uSp, size_t cbToRead, void *pvDst);
53
54/**
55 * Unwind context.
56 *
57 * @note Using a constructor and destructor here for simple+safe cleanup.
58 */
59typedef struct DBGFUNWINDCTX
60{
61 PUVM m_pUVM;
62 VMCPUID m_idCpu;
63 RTDBGAS m_hAs;
64 PCCPUMCTX m_pInitialCtx;
65 bool m_fIsHostRing0;
66 uint64_t m_uOsScratch; /**< For passing to DBGFOSREG::pfnStackUnwindAssist. */
67
68 RTDBGMOD m_hCached;
69 RTUINTPTR m_uCachedMapping;
70 RTUINTPTR m_cbCachedMapping;
71 RTDBGSEGIDX m_idxCachedSegMapping;
72
73 RTDBGUNWINDSTATE m_State;
74
75 DBGFUNWINDCTX(PUVM pUVM, VMCPUID idCpu, PCCPUMCTX pInitialCtx, RTDBGAS hAs)
76 {
77 m_State.u32Magic = RTDBGUNWINDSTATE_MAGIC;
78#if defined(VBOX_VMM_TARGET_ARMV8)
79 m_State.enmArch = RTLDRARCH_ARM64;
80#else
81 m_State.enmArch = RTLDRARCH_AMD64;
82#endif
83 m_State.pfnReadStack = dbgfR3StackReadCallback;
84 m_State.pvUser = this;
85 RT_ZERO(m_State.u);
86 if (pInitialCtx)
87 {
88#if defined(VBOX_VMM_TARGET_ARMV8)
89 AssertCompile(RT_ELEMENTS(m_State.u.armv8.auGprs) == RT_ELEMENTS(pInitialCtx->aGRegs));
90
91 m_State.uPc = pInitialCtx->Pc.u64;
92 m_State.u.armv8.uSpEl0 = pInitialCtx->aSpReg[0].u64;
93 m_State.u.armv8.uSpEl1 = pInitialCtx->aSpReg[1].u64;
94
95 for (uint32_t i = 0; i < RT_ELEMENTS(m_State.u.armv8.auGprs); i++)
96 m_State.u.armv8.auGprs[i] = pInitialCtx->aGRegs[i].x;
97#else
98 m_State.u.x86.auRegs[X86_GREG_xAX] = pInitialCtx->rax;
99 m_State.u.x86.auRegs[X86_GREG_xCX] = pInitialCtx->rcx;
100 m_State.u.x86.auRegs[X86_GREG_xDX] = pInitialCtx->rdx;
101 m_State.u.x86.auRegs[X86_GREG_xBX] = pInitialCtx->rbx;
102 m_State.u.x86.auRegs[X86_GREG_xSP] = pInitialCtx->rsp;
103 m_State.u.x86.auRegs[X86_GREG_xBP] = pInitialCtx->rbp;
104 m_State.u.x86.auRegs[X86_GREG_xSI] = pInitialCtx->rsi;
105 m_State.u.x86.auRegs[X86_GREG_xDI] = pInitialCtx->rdi;
106 m_State.u.x86.auRegs[X86_GREG_x8 ] = pInitialCtx->r8;
107 m_State.u.x86.auRegs[X86_GREG_x9 ] = pInitialCtx->r9;
108 m_State.u.x86.auRegs[X86_GREG_x10] = pInitialCtx->r10;
109 m_State.u.x86.auRegs[X86_GREG_x11] = pInitialCtx->r11;
110 m_State.u.x86.auRegs[X86_GREG_x12] = pInitialCtx->r12;
111 m_State.u.x86.auRegs[X86_GREG_x13] = pInitialCtx->r13;
112 m_State.u.x86.auRegs[X86_GREG_x14] = pInitialCtx->r14;
113 m_State.u.x86.auRegs[X86_GREG_x15] = pInitialCtx->r15;
114 m_State.uPc = pInitialCtx->rip;
115 m_State.u.x86.uRFlags = pInitialCtx->rflags.u;
116 m_State.u.x86.auSegs[X86_SREG_ES] = pInitialCtx->es.Sel;
117 m_State.u.x86.auSegs[X86_SREG_CS] = pInitialCtx->cs.Sel;
118 m_State.u.x86.auSegs[X86_SREG_SS] = pInitialCtx->ss.Sel;
119 m_State.u.x86.auSegs[X86_SREG_DS] = pInitialCtx->ds.Sel;
120 m_State.u.x86.auSegs[X86_SREG_GS] = pInitialCtx->gs.Sel;
121 m_State.u.x86.auSegs[X86_SREG_FS] = pInitialCtx->fs.Sel;
122 m_State.u.x86.fRealOrV86 = CPUMIsGuestInRealOrV86ModeEx(pInitialCtx);
123#endif
124 }
125 else if (hAs == DBGF_AS_R0)
126 VMMR3InitR0StackUnwindState(pUVM, idCpu, &m_State);
127
128 m_pUVM = pUVM;
129 m_idCpu = idCpu;
130 m_hAs = DBGFR3AsResolveAndRetain(pUVM, hAs);
131 m_pInitialCtx = pInitialCtx;
132 m_fIsHostRing0 = hAs == DBGF_AS_R0;
133 m_uOsScratch = 0;
134
135 m_hCached = NIL_RTDBGMOD;
136 m_uCachedMapping = 0;
137 m_cbCachedMapping = 0;
138 m_idxCachedSegMapping = NIL_RTDBGSEGIDX;
139 }
140
141 ~DBGFUNWINDCTX();
142
143} DBGFUNWINDCTX;
144/** Pointer to unwind context. */
145typedef DBGFUNWINDCTX *PDBGFUNWINDCTX;
146
147
148static void dbgfR3UnwindCtxFlushCache(PDBGFUNWINDCTX pUnwindCtx)
149{
150 if (pUnwindCtx->m_hCached != NIL_RTDBGMOD)
151 {
152 RTDbgModRelease(pUnwindCtx->m_hCached);
153 pUnwindCtx->m_hCached = NIL_RTDBGMOD;
154 }
155 pUnwindCtx->m_cbCachedMapping = 0;
156 pUnwindCtx->m_idxCachedSegMapping = NIL_RTDBGSEGIDX;
157}
158
159
160DBGFUNWINDCTX::~DBGFUNWINDCTX()
161{
162 dbgfR3UnwindCtxFlushCache(this);
163 if (m_hAs != NIL_RTDBGAS)
164 {
165 RTDbgAsRelease(m_hAs);
166 m_hAs = NIL_RTDBGAS;
167 }
168}
169
170
171/**
172 * @interface_method_impl{RTDBGUNWINDSTATE,pfnReadStack}
173 */
174static DECLCALLBACK(int) dbgfR3StackReadCallback(PRTDBGUNWINDSTATE pThis, RTUINTPTR uSp, size_t cbToRead, void *pvDst)
175{
176#if defined(VBOX_VMM_TARGET_ARMV8)
177 Assert(pThis->enmArch == RTLDRARCH_ARM64);
178#else
179 Assert( pThis->enmArch == RTLDRARCH_AMD64
180 || pThis->enmArch == RTLDRARCH_X86_32);
181#endif
182
183 PDBGFUNWINDCTX pUnwindCtx = (PDBGFUNWINDCTX)pThis->pvUser;
184 DBGFADDRESS SrcAddr;
185 int rc = VINF_SUCCESS;
186 if (pUnwindCtx->m_fIsHostRing0)
187 DBGFR3AddrFromHostR0(&SrcAddr, uSp);
188 else
189 {
190#if defined(VBOX_VMM_TARGET_ARMV8)
191 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &SrcAddr, uSp);
192#else
193 if ( pThis->enmArch == RTLDRARCH_X86_32
194 || pThis->enmArch == RTLDRARCH_X86_16)
195 {
196 if (!pThis->u.x86.fRealOrV86)
197 rc = DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &SrcAddr, pThis->u.x86.auSegs[X86_SREG_SS], uSp);
198 else
199 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &SrcAddr, uSp + ((uint32_t)pThis->u.x86.auSegs[X86_SREG_SS] << 4));
200 }
201 else
202 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &SrcAddr, uSp);
203#endif
204 }
205 if (RT_SUCCESS(rc))
206 rc = DBGFR3MemRead(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &SrcAddr, pvDst, cbToRead);
207 if (RT_SUCCESS(rc))
208 return rc;
209 return -rc; /* Ignore read errors. */
210}
211
212
213/**
214 * Sets PC and SP.
215 *
216 * @returns true.
217 * @param pUnwindCtx The unwind context.
218 * @param pAddrPC The program counter (PC) value to set.
219 * @param pAddrStack The stack pointer (SP) value to set.
220 */
221static bool dbgfR3UnwindCtxSetPcAndSp(PDBGFUNWINDCTX pUnwindCtx, PCDBGFADDRESS pAddrPC, PCDBGFADDRESS pAddrStack)
222{
223#if defined(VBOX_VMM_TARGET_ARMV8)
224 Assert(pUnwindCtx->m_State.enmArch == RTLDRARCH_ARM64);
225
226 Assert(!DBGFADDRESS_IS_FAR(pAddrPC));
227 pUnwindCtx->m_State.uPc = pAddrPC->FlatPtr;
228 Assert(!DBGFADDRESS_IS_FAR(pAddrStack));
229 pUnwindCtx->m_State.u.armv8.uSpEl1 = pAddrStack->FlatPtr; /** @todo EL0 stack pointer. */
230#else
231 Assert( pUnwindCtx->m_State.enmArch == RTLDRARCH_AMD64
232 || pUnwindCtx->m_State.enmArch == RTLDRARCH_X86_32);
233
234 if (!DBGFADDRESS_IS_FAR(pAddrPC))
235 pUnwindCtx->m_State.uPc = pAddrPC->FlatPtr;
236 else
237 {
238 pUnwindCtx->m_State.uPc = pAddrPC->off;
239 pUnwindCtx->m_State.u.x86.auSegs[X86_SREG_CS] = pAddrPC->Sel;
240 }
241 if (!DBGFADDRESS_IS_FAR(pAddrStack))
242 pUnwindCtx->m_State.u.x86.auRegs[X86_GREG_xSP] = pAddrStack->FlatPtr;
243 else
244 {
245 pUnwindCtx->m_State.u.x86.auRegs[X86_GREG_xSP] = pAddrStack->off;
246 pUnwindCtx->m_State.u.x86.auSegs[X86_SREG_SS] = pAddrStack->Sel;
247 }
248#endif
249
250 return true;
251}
252
253
254/**
255 * Tries to unwind one frame using unwind info.
256 *
257 * @returns true on success, false on failure.
258 * @param pUnwindCtx The unwind context.
259 */
260static bool dbgfR3UnwindCtxDoOneFrame(PDBGFUNWINDCTX pUnwindCtx)
261{
262 /*
263 * Need to load it into the cache?
264 */
265 RTUINTPTR offCache = pUnwindCtx->m_State.uPc - pUnwindCtx->m_uCachedMapping;
266 if (offCache >= pUnwindCtx->m_cbCachedMapping)
267 {
268 RTDBGMOD hDbgMod = NIL_RTDBGMOD;
269 RTUINTPTR uBase = 0;
270 RTDBGSEGIDX idxSeg = NIL_RTDBGSEGIDX;
271 int rc = RTDbgAsModuleByAddr(pUnwindCtx->m_hAs, pUnwindCtx->m_State.uPc, &hDbgMod, &uBase, &idxSeg);
272 if (RT_SUCCESS(rc))
273 {
274 dbgfR3UnwindCtxFlushCache(pUnwindCtx);
275 pUnwindCtx->m_hCached = hDbgMod;
276 pUnwindCtx->m_uCachedMapping = uBase;
277 pUnwindCtx->m_idxCachedSegMapping = idxSeg;
278 pUnwindCtx->m_cbCachedMapping = idxSeg == NIL_RTDBGSEGIDX ? RTDbgModImageSize(hDbgMod)
279 : RTDbgModSegmentSize(hDbgMod, idxSeg);
280 offCache = pUnwindCtx->m_State.uPc - uBase;
281 }
282 else
283 return false;
284 }
285
286 /*
287 * Do the lookup.
288 */
289 AssertCompile(UINT32_MAX == NIL_RTDBGSEGIDX);
290 int rc = RTDbgModUnwindFrame(pUnwindCtx->m_hCached, pUnwindCtx->m_idxCachedSegMapping, offCache, &pUnwindCtx->m_State);
291 if (RT_SUCCESS(rc))
292 return true;
293 return false;
294}
295
296
297/**
298 * Read stack memory, will init entire buffer.
299 */
300DECLINLINE(int) dbgfR3StackRead(PUVM pUVM, VMCPUID idCpu, void *pvBuf, PCDBGFADDRESS pSrcAddr, size_t cb, size_t *pcbRead)
301{
302 int rc = DBGFR3MemRead(pUVM, idCpu, pSrcAddr, pvBuf, cb);
303 if (RT_FAILURE(rc))
304 {
305 /* fallback: byte by byte and zero the ones we fail to read. */
306 size_t cbRead;
307 for (cbRead = 0; cbRead < cb; cbRead++)
308 {
309 DBGFADDRESS Addr = *pSrcAddr;
310 rc = DBGFR3MemRead(pUVM, idCpu, DBGFR3AddrAdd(&Addr, cbRead), (uint8_t *)pvBuf + cbRead, 1);
311 if (RT_FAILURE(rc))
312 break;
313 }
314 if (cbRead)
315 rc = VINF_SUCCESS;
316 memset((char *)pvBuf + cbRead, 0, cb - cbRead);
317 *pcbRead = cbRead;
318 }
319 else
320 *pcbRead = cb;
321 return rc;
322}
323
324#if !defined(VBOX_VMM_TARGET_ARMV8) /** @todo Unused on ARMv8 for now. */
325/**
326 * Collects sure registers on frame exit.
327 *
328 * @returns VINF_SUCCESS or VERR_NO_MEMORY.
329 * @param pUVM The user mode VM handle for the allocation.
330 * @param pFrame The frame in question.
331 * @param pState The unwind state.
332 */
333static int dbgfR3StackWalkCollectRegisterChanges(PUVM pUVM, PDBGFSTACKFRAME pFrame, PRTDBGUNWINDSTATE pState)
334{
335 pFrame->cSureRegs = 0;
336 pFrame->paSureRegs = NULL;
337
338#if defined(VBOX_VMM_TARGET_ARMV8)
339 if (pState->enmArch == RTLDRARCH_ARM64)
340 {
341 if (pState->u.armv8.Loaded.fAll)
342 {
343 /*
344 * Count relevant registers.
345 */
346 uint32_t cRegs = 0;
347 if (pState->u.armv8.Loaded.s.fRegs)
348 for (uint32_t f = 1; f < RT_BIT_32(RT_ELEMENTS(pState->u.armv8.auGprs)); f <<= 1)
349 if (pState->u.armv8.Loaded.s.fRegs & f)
350 cRegs++;
351 if (pState->u.armv8.Loaded.s.fSpEl0)
352 cRegs++;
353 if (pState->u.armv8.Loaded.s.fSpEl1)
354 cRegs++;
355 if (pState->u.armv8.Loaded.s.fPc)
356 cRegs++;
357 if (cRegs > 0)
358 {
359 /*
360 * Allocate the arrays.
361 */
362 PDBGFREGVALEX paSureRegs = (PDBGFREGVALEX)MMR3HeapAllocZU(pUVM, MM_TAG_DBGF_STACK, sizeof(DBGFREGVALEX) * cRegs);
363 AssertReturn(paSureRegs, VERR_NO_MEMORY);
364 pFrame->paSureRegs = paSureRegs;
365 pFrame->cSureRegs = cRegs;
366
367 /*
368 * Popuplate the arrays.
369 */
370 uint32_t iReg = 0;
371 if (pState->u.armv8.Loaded.s.fRegs)
372 for (uint32_t i = 0; i < RT_ELEMENTS(pState->u.armv8.auGprs); i++)
373 if (pState->u.armv8.Loaded.s.fRegs & RT_BIT(i))
374 {
375 paSureRegs[iReg].Value.u64 = pState->u.armv8.auGprs[i];
376 paSureRegs[iReg].enmType = DBGFREGVALTYPE_U64;
377 paSureRegs[iReg].enmReg = (DBGFREG)(DBGFREG_ARMV8_GREG_X0 + i);
378 iReg++;
379 }
380
381 if (iReg < cRegs)
382 {
383 if (pState->u.armv8.Loaded.s.fSpEl0)
384 {
385 paSureRegs[iReg].Value.u64 = pState->u.armv8.uSpEl0;
386 paSureRegs[iReg].enmType = DBGFREGVALTYPE_U64;
387 paSureRegs[iReg].enmReg = DBGFREG_ARMV8_SP_EL0;
388 iReg++;
389 }
390 if (pState->u.armv8.Loaded.s.fSpEl1)
391 {
392 paSureRegs[iReg].Value.u64 = pState->u.armv8.uSpEl1;
393 paSureRegs[iReg].enmType = DBGFREGVALTYPE_U64;
394 paSureRegs[iReg].enmReg = DBGFREG_ARMV8_SP_EL1;
395 iReg++;
396 }
397 if (pState->u.armv8.Loaded.s.fPc)
398 {
399 paSureRegs[iReg].Value.u64 = pState->uPc;
400 paSureRegs[iReg].enmType = DBGFREGVALTYPE_U64;
401 paSureRegs[iReg].enmReg = DBGFREG_ARMV8_PC;
402 iReg++;
403 }
404 }
405 Assert(iReg == cRegs);
406 }
407 }
408 }
409#else
410 if ( pState->enmArch == RTLDRARCH_AMD64
411 || pState->enmArch == RTLDRARCH_X86_32
412 || pState->enmArch == RTLDRARCH_X86_16)
413 {
414 if (pState->u.x86.Loaded.fAll)
415 {
416 /*
417 * Count relevant registers.
418 */
419 uint32_t cRegs = 0;
420 if (pState->u.x86.Loaded.s.fRegs)
421 for (uint32_t f = 1; f < RT_BIT_32(RT_ELEMENTS(pState->u.x86.auRegs)); f <<= 1)
422 if (pState->u.x86.Loaded.s.fRegs & f)
423 cRegs++;
424 if (pState->u.x86.Loaded.s.fSegs)
425 for (uint32_t f = 1; f < RT_BIT_32(RT_ELEMENTS(pState->u.x86.auSegs)); f <<= 1)
426 if (pState->u.x86.Loaded.s.fSegs & f)
427 cRegs++;
428 if (pState->u.x86.Loaded.s.fRFlags)
429 cRegs++;
430 if (pState->u.x86.Loaded.s.fErrCd)
431 cRegs++;
432 if (cRegs > 0)
433 {
434 /*
435 * Allocate the arrays.
436 */
437 PDBGFREGVALEX paSureRegs = (PDBGFREGVALEX)MMR3HeapAllocZU(pUVM, MM_TAG_DBGF_STACK, sizeof(DBGFREGVALEX) * cRegs);
438 AssertReturn(paSureRegs, VERR_NO_MEMORY);
439 pFrame->paSureRegs = paSureRegs;
440 pFrame->cSureRegs = cRegs;
441
442 /*
443 * Popuplate the arrays.
444 */
445 uint32_t iReg = 0;
446 if (pState->u.x86.Loaded.s.fRegs)
447 for (uint32_t i = 0; i < RT_ELEMENTS(pState->u.x86.auRegs); i++)
448 if (pState->u.x86.Loaded.s.fRegs & RT_BIT(i))
449 {
450 paSureRegs[iReg].Value.u64 = pState->u.x86.auRegs[i];
451 paSureRegs[iReg].enmType = DBGFREGVALTYPE_U64;
452 paSureRegs[iReg].enmReg = (DBGFREG)(DBGFREG_RAX + i);
453 iReg++;
454 }
455
456 if (pState->u.x86.Loaded.s.fSegs)
457 for (uint32_t i = 0; i < RT_ELEMENTS(pState->u.x86.auSegs); i++)
458 if (pState->u.x86.Loaded.s.fSegs & RT_BIT(i))
459 {
460 paSureRegs[iReg].Value.u16 = pState->u.x86.auSegs[i];
461 paSureRegs[iReg].enmType = DBGFREGVALTYPE_U16;
462 switch (i)
463 {
464 case X86_SREG_ES: paSureRegs[iReg].enmReg = DBGFREG_ES; break;
465 case X86_SREG_CS: paSureRegs[iReg].enmReg = DBGFREG_CS; break;
466 case X86_SREG_SS: paSureRegs[iReg].enmReg = DBGFREG_SS; break;
467 case X86_SREG_DS: paSureRegs[iReg].enmReg = DBGFREG_DS; break;
468 case X86_SREG_FS: paSureRegs[iReg].enmReg = DBGFREG_FS; break;
469 case X86_SREG_GS: paSureRegs[iReg].enmReg = DBGFREG_GS; break;
470 default: AssertFailedBreak();
471 }
472 iReg++;
473 }
474
475 if (iReg < cRegs)
476 {
477 if (pState->u.x86.Loaded.s.fRFlags)
478 {
479 paSureRegs[iReg].Value.u64 = pState->u.x86.uRFlags;
480 paSureRegs[iReg].enmType = DBGFREGVALTYPE_U64;
481 paSureRegs[iReg].enmReg = DBGFREG_RFLAGS;
482 iReg++;
483 }
484 if (pState->u.x86.Loaded.s.fErrCd)
485 {
486 paSureRegs[iReg].Value.u64 = pState->u.x86.uErrCd;
487 paSureRegs[iReg].enmType = DBGFREGVALTYPE_U64;
488 paSureRegs[iReg].enmReg = DBGFREG_END;
489 paSureRegs[iReg].pszName = "trap-errcd";
490 iReg++;
491 }
492 }
493 Assert(iReg == cRegs);
494 }
495 }
496 }
497#endif
498
499 return VINF_SUCCESS;
500}
501#endif
502
503
504#if defined(VBOX_VMM_TARGET_ARMV8)
505/**
506 * Internal worker routine.
507 *
508 * On aarch64 the typical stack frame layout is like this:
509 * .. ..
510 * 4 return address
511 * 0 old fp; current fp points here
512 */
513DECL_NO_INLINE(static, int) dbgfR3StackWalk(PDBGFUNWINDCTX pUnwindCtx, PDBGFSTACKFRAME pFrame, bool fFirst)
514{
515 /*
516 * Stop if we got a read error in the previous run.
517 */
518 if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_LAST)
519 return VERR_NO_MORE_FILES;
520
521 /*
522 * Advance the frame (except for the first).
523 */
524 if (!fFirst) /** @todo we can probably eliminate this fFirst business... */
525 {
526 /* frame, pc and stack is taken from the existing frames return members. */
527 pFrame->AddrFrame = pFrame->AddrReturnFrame;
528 pFrame->AddrPC = pFrame->AddrReturnPC;
529 pFrame->pSymPC = pFrame->pSymReturnPC;
530 pFrame->pLinePC = pFrame->pLineReturnPC;
531
532 /* increment the frame number. */
533 pFrame->iFrame++;
534
535 /* UNWIND_INFO_RET -> USED_UNWIND; return type */
536 if (!(pFrame->fFlags & DBGFSTACKFRAME_FLAGS_UNWIND_INFO_RET))
537 pFrame->fFlags &= ~DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO;
538 else
539 {
540 pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO;
541 pFrame->fFlags &= ~DBGFSTACKFRAME_FLAGS_UNWIND_INFO_RET;
542 if (pFrame->enmReturnFrameReturnType != RTDBGRETURNTYPE_INVALID)
543 {
544 pFrame->enmReturnType = pFrame->enmReturnFrameReturnType;
545 pFrame->enmReturnFrameReturnType = RTDBGRETURNTYPE_INVALID;
546 }
547 }
548 pFrame->fFlags &= ~DBGFSTACKFRAME_FLAGS_TRAP_FRAME;
549 }
550
551 /*
552 * Figure the return address size and use the old PC to guess stack item size.
553 */
554 unsigned const cbRetAddr = 8;
555 unsigned const cbStackItem = 8; /** @todo AARCH32. */
556 PVMCPUCC const pVCpu = pUnwindCtx->m_pUVM->pVM->apCpusR3[pUnwindCtx->m_idCpu];
557
558 /*
559 * Read the raw frame data.
560 * We double cbRetAddr in case we have a far return.
561 */
562 union
563 {
564 uint64_t *pu64;
565 uint32_t *pu32;
566 uint8_t *pb;
567 void *pv;
568 } u, uRet, uArgs, uBp;
569 size_t cbRead = cbRetAddr * 2 + cbStackItem + sizeof(pFrame->Args);
570 u.pv = alloca(cbRead);
571 uBp = u;
572 uRet.pb = u.pb + cbStackItem;
573 uArgs.pb = u.pb + cbStackItem + cbRetAddr;
574
575 Assert(DBGFADDRESS_IS_VALID(&pFrame->AddrFrame));
576 int rc = dbgfR3StackRead(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, u.pv, &pFrame->AddrFrame, cbRead, &cbRead);
577 if ( RT_FAILURE(rc)
578 || cbRead < cbRetAddr + cbStackItem)
579 pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_LAST;
580
581 /*
582 * Return Frame address.
583 *
584 * If we used unwind info to get here, the unwind register context will be
585 * positioned after the return instruction has been executed. We start by
586 * picking up the rBP register here for return frame and will try improve
587 * on it further down by using unwind info.
588 */
589 pFrame->AddrReturnFrame = pFrame->AddrFrame;
590 if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO)
591 {
592 AssertFailed(); /** @todo */
593 }
594 else
595 {
596 switch (cbStackItem)
597 {
598 case 4: pFrame->AddrReturnFrame.off = *uBp.pu32; break;
599 case 8: pFrame->AddrReturnFrame.off = CPUMGetGCPtrPacStripped(pVCpu, *uBp.pu64); break;
600 default: AssertMsgFailedReturn(("cbStackItem=%d\n", cbStackItem), VERR_DBGF_STACK_IPE_1);
601 }
602
603 pFrame->AddrReturnFrame.FlatPtr += pFrame->AddrReturnFrame.off - pFrame->AddrFrame.off;
604 }
605
606 /*
607 * Return Stack Address.
608 */
609 pFrame->AddrReturnStack = pFrame->AddrReturnFrame;
610 if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO)
611 {
612 AssertFailed();
613 }
614 else
615 {
616 pFrame->AddrReturnStack.off += cbStackItem + cbRetAddr;
617 pFrame->AddrReturnStack.FlatPtr += cbStackItem + cbRetAddr;
618 }
619
620 /*
621 * Return PC.
622 */
623 pFrame->AddrReturnPC = pFrame->AddrPC;
624 if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO)
625 {
626 AssertFailed();
627 }
628 else
629 {
630 switch (pFrame->enmReturnType)
631 {
632 case RTDBGRETURNTYPE_NEAR64:
633 if (DBGFADDRESS_IS_VALID(&pFrame->AddrReturnPC))
634 {
635 pFrame->AddrReturnPC.FlatPtr += CPUMGetGCPtrPacStripped(pVCpu, *uRet.pu64) - pFrame->AddrReturnPC.off;
636 pFrame->AddrReturnPC.off = CPUMGetGCPtrPacStripped(pVCpu, *uRet.pu64);
637 }
638 else
639 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &pFrame->AddrReturnPC, CPUMGetGCPtrPacStripped(pVCpu, *uRet.pu64));
640 break;
641 default:
642 AssertMsgFailed(("enmReturnType=%d\n", pFrame->enmReturnType));
643 return VERR_INVALID_PARAMETER;
644 }
645 }
646
647
648 pFrame->pSymReturnPC = DBGFR3AsSymbolByAddrA(pUnwindCtx->m_pUVM, pUnwindCtx->m_hAs, &pFrame->AddrReturnPC,
649 RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL | RTDBGSYMADDR_FLAGS_SKIP_ABS_IN_DEFERRED,
650 NULL /*poffDisp*/, NULL /*phMod*/);
651 pFrame->pLineReturnPC = DBGFR3AsLineByAddrA(pUnwindCtx->m_pUVM, pUnwindCtx->m_hAs, &pFrame->AddrReturnPC,
652 NULL /*poffDisp*/, NULL /*phMod*/);
653
654 /*
655 * Frame bitness flag.
656 */
657 /** @todo use previous return type for this? */
658 pFrame->fFlags &= ~(DBGFSTACKFRAME_FLAGS_32BIT | DBGFSTACKFRAME_FLAGS_64BIT);
659 switch (cbStackItem)
660 {
661 case 4: pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_32BIT; break;
662 case 8: pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_64BIT; break;
663 default: AssertMsgFailedReturn(("cbStackItem=%d\n", cbStackItem), VERR_DBGF_STACK_IPE_2);
664 }
665
666 /*
667 * The arguments.
668 */
669 memcpy(&pFrame->Args, uArgs.pv, sizeof(pFrame->Args));
670
671 /*
672 * Collect register changes.
673 * Then call the OS layer to assist us (e.g. NT trap frames).
674 */
675 if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO)
676 {
677 AssertFailed();
678 }
679
680 /*
681 * Try use unwind information to locate the return frame pointer (for the
682 * next loop iteration).
683 */
684 Assert(!(pFrame->fFlags & DBGFSTACKFRAME_FLAGS_UNWIND_INFO_RET));
685 pFrame->enmReturnFrameReturnType = RTDBGRETURNTYPE_INVALID;
686 if (!(pFrame->fFlags & DBGFSTACKFRAME_FLAGS_LAST))
687 {
688 /* Set PC and SP if we didn't unwind our way here (context will then point
689 and the return PC and SP already). */
690 if (!(pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO))
691 {
692 dbgfR3UnwindCtxSetPcAndSp(pUnwindCtx, &pFrame->AddrReturnPC, &pFrame->AddrReturnStack);
693 pUnwindCtx->m_State.u.armv8.auGprs[ARMV8_A64_REG_BP] = pFrame->AddrReturnFrame.off;
694 }
695 if (pUnwindCtx->m_State.enmArch == RTLDRARCH_ARM64)
696 pUnwindCtx->m_State.u.armv8.Loaded.fAll = 0;
697 else
698 AssertFailed();
699 if (dbgfR3UnwindCtxDoOneFrame(pUnwindCtx))
700 {
701 Assert(!pUnwindCtx->m_fIsHostRing0);
702
703 DBGFADDRESS AddrReturnFrame = pFrame->AddrReturnFrame;
704 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &AddrReturnFrame, pUnwindCtx->m_State.u.armv8.FrameAddr);
705 pFrame->AddrReturnFrame = AddrReturnFrame;
706
707 pFrame->enmReturnFrameReturnType = pUnwindCtx->m_State.enmRetType;
708 pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_UNWIND_INFO_RET;
709 }
710 }
711
712 return VINF_SUCCESS;
713}
714#else
715/**
716 * Internal worker routine.
717 *
718 * On x86 the typical stack frame layout is like this:
719 * .. ..
720 * 16 parameter 2
721 * 12 parameter 1
722 * 8 parameter 0
723 * 4 return address
724 * 0 old ebp; current ebp points here
725 */
726DECL_NO_INLINE(static, int) dbgfR3StackWalk(PDBGFUNWINDCTX pUnwindCtx, PDBGFSTACKFRAME pFrame, bool fFirst)
727{
728 /*
729 * Stop if we got a read error in the previous run.
730 */
731 if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_LAST)
732 return VERR_NO_MORE_FILES;
733
734 /*
735 * Advance the frame (except for the first).
736 */
737 if (!fFirst) /** @todo we can probably eliminate this fFirst business... */
738 {
739 /* frame, pc and stack is taken from the existing frames return members. */
740 pFrame->AddrFrame = pFrame->AddrReturnFrame;
741 pFrame->AddrPC = pFrame->AddrReturnPC;
742 pFrame->pSymPC = pFrame->pSymReturnPC;
743 pFrame->pLinePC = pFrame->pLineReturnPC;
744
745 /* increment the frame number. */
746 pFrame->iFrame++;
747
748 /* UNWIND_INFO_RET -> USED_UNWIND; return type */
749 if (!(pFrame->fFlags & DBGFSTACKFRAME_FLAGS_UNWIND_INFO_RET))
750 pFrame->fFlags &= ~DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO;
751 else
752 {
753 pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO;
754 pFrame->fFlags &= ~DBGFSTACKFRAME_FLAGS_UNWIND_INFO_RET;
755 if (pFrame->enmReturnFrameReturnType != RTDBGRETURNTYPE_INVALID)
756 {
757 pFrame->enmReturnType = pFrame->enmReturnFrameReturnType;
758 pFrame->enmReturnFrameReturnType = RTDBGRETURNTYPE_INVALID;
759 }
760 }
761 pFrame->fFlags &= ~DBGFSTACKFRAME_FLAGS_TRAP_FRAME;
762 }
763
764 /*
765 * Figure the return address size and use the old PC to guess stack item size.
766 */
767 /** @todo this is bogus... */
768 unsigned cbRetAddr = RTDbgReturnTypeSize(pFrame->enmReturnType);
769 unsigned cbStackItem;
770 switch (pFrame->AddrPC.fFlags & DBGFADDRESS_FLAGS_TYPE_MASK)
771 {
772 case DBGFADDRESS_FLAGS_FAR16: cbStackItem = 2; break;
773 case DBGFADDRESS_FLAGS_FAR32: cbStackItem = 4; break;
774 case DBGFADDRESS_FLAGS_FAR64: cbStackItem = 8; break;
775 case DBGFADDRESS_FLAGS_RING0: cbStackItem = sizeof(RTHCUINTPTR); break;
776 default:
777 switch (pFrame->enmReturnType)
778 {
779 case RTDBGRETURNTYPE_FAR16:
780 case RTDBGRETURNTYPE_IRET16:
781 case RTDBGRETURNTYPE_IRET32_V86:
782 case RTDBGRETURNTYPE_NEAR16: cbStackItem = 2; break;
783
784 case RTDBGRETURNTYPE_FAR32:
785 case RTDBGRETURNTYPE_IRET32:
786 case RTDBGRETURNTYPE_IRET32_PRIV:
787 case RTDBGRETURNTYPE_NEAR32: cbStackItem = 4; break;
788
789 case RTDBGRETURNTYPE_FAR64:
790 case RTDBGRETURNTYPE_IRET64:
791 case RTDBGRETURNTYPE_NEAR64: cbStackItem = 8; break;
792
793 default:
794 AssertMsgFailed(("%d\n", pFrame->enmReturnType));
795 cbStackItem = 4;
796 break;
797 }
798 }
799
800 /*
801 * Read the raw frame data.
802 * We double cbRetAddr in case we have a far return.
803 */
804 union
805 {
806 uint64_t *pu64;
807 uint32_t *pu32;
808 uint16_t *pu16;
809 uint8_t *pb;
810 void *pv;
811 } u, uRet, uArgs, uBp;
812 size_t cbRead = cbRetAddr*2 + cbStackItem + sizeof(pFrame->Args);
813 u.pv = alloca(cbRead);
814 uBp = u;
815 uRet.pb = u.pb + cbStackItem;
816 uArgs.pb = u.pb + cbStackItem + cbRetAddr;
817
818 Assert(DBGFADDRESS_IS_VALID(&pFrame->AddrFrame));
819 int rc = dbgfR3StackRead(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, u.pv, &pFrame->AddrFrame, cbRead, &cbRead);
820 if ( RT_FAILURE(rc)
821 || cbRead < cbRetAddr + cbStackItem)
822 pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_LAST;
823
824 /*
825 * Return Frame address.
826 *
827 * If we used unwind info to get here, the unwind register context will be
828 * positioned after the return instruction has been executed. We start by
829 * picking up the rBP register here for return frame and will try improve
830 * on it further down by using unwind info.
831 */
832 pFrame->AddrReturnFrame = pFrame->AddrFrame;
833 if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO)
834 {
835 if ( pFrame->enmReturnType == RTDBGRETURNTYPE_IRET32_PRIV
836 || pFrame->enmReturnType == RTDBGRETURNTYPE_IRET64)
837 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnFrame,
838 pUnwindCtx->m_State.u.x86.auSegs[X86_SREG_SS], pUnwindCtx->m_State.u.x86.auRegs[X86_GREG_xBP]);
839 else if (pFrame->enmReturnType == RTDBGRETURNTYPE_IRET32_V86)
840 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &pFrame->AddrReturnFrame,
841 ((uint32_t)pUnwindCtx->m_State.u.x86.auSegs[X86_SREG_SS] << 4)
842 + pUnwindCtx->m_State.u.x86.auRegs[X86_GREG_xBP]);
843 else
844 {
845 pFrame->AddrReturnFrame.off = pUnwindCtx->m_State.u.x86.auRegs[X86_GREG_xBP];
846 pFrame->AddrReturnFrame.FlatPtr += pFrame->AddrReturnFrame.off - pFrame->AddrFrame.off;
847 }
848 }
849 else
850 {
851 switch (cbStackItem)
852 {
853 case 2: pFrame->AddrReturnFrame.off = *uBp.pu16; break;
854 case 4: pFrame->AddrReturnFrame.off = *uBp.pu32; break;
855 case 8: pFrame->AddrReturnFrame.off = *uBp.pu64; break;
856 default: AssertMsgFailedReturn(("cbStackItem=%d\n", cbStackItem), VERR_DBGF_STACK_IPE_1);
857 }
858
859 /* Watcom tries to keep the frame pointer odd for far returns. */
860 if ( cbStackItem <= 4
861 && !(pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO))
862 {
863 if (pFrame->AddrReturnFrame.off & 1)
864 {
865 pFrame->AddrReturnFrame.off &= ~(RTGCUINTPTR)1;
866 if (pFrame->enmReturnType == RTDBGRETURNTYPE_NEAR16)
867 {
868 pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_USED_ODD_EVEN;
869 pFrame->enmReturnType = RTDBGRETURNTYPE_FAR16;
870 cbRetAddr = 4;
871 }
872 else if (pFrame->enmReturnType == RTDBGRETURNTYPE_NEAR32)
873 {
874#if 1
875 /* Assumes returning 32-bit code. */
876 pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_USED_ODD_EVEN;
877 pFrame->enmReturnType = RTDBGRETURNTYPE_FAR32;
878 cbRetAddr = 8;
879#else
880 /* Assumes returning 16-bit code. */
881 pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_USED_ODD_EVEN;
882 pFrame->enmReturnType = RTDBGRETURNTYPE_FAR16;
883 cbRetAddr = 4;
884#endif
885 }
886 }
887 else if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_ODD_EVEN)
888 {
889 if (pFrame->enmReturnType == RTDBGRETURNTYPE_FAR16)
890 {
891 pFrame->enmReturnType = RTDBGRETURNTYPE_NEAR16;
892 cbRetAddr = 2;
893 }
894 else if (pFrame->enmReturnType == RTDBGRETURNTYPE_NEAR32)
895 {
896 pFrame->enmReturnType = RTDBGRETURNTYPE_FAR32;
897 cbRetAddr = 4;
898 }
899 pFrame->fFlags &= ~DBGFSTACKFRAME_FLAGS_USED_ODD_EVEN;
900 }
901 uArgs.pb = u.pb + cbStackItem + cbRetAddr;
902 }
903
904 pFrame->AddrReturnFrame.FlatPtr += pFrame->AddrReturnFrame.off - pFrame->AddrFrame.off;
905 }
906
907 /*
908 * Return Stack Address.
909 */
910 pFrame->AddrReturnStack = pFrame->AddrReturnFrame;
911 if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO)
912 {
913 if ( pFrame->enmReturnType == RTDBGRETURNTYPE_IRET32_PRIV
914 || pFrame->enmReturnType == RTDBGRETURNTYPE_IRET64)
915 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnStack,
916 pUnwindCtx->m_State.u.x86.auSegs[X86_SREG_SS], pUnwindCtx->m_State.u.x86.auRegs[X86_GREG_xSP]);
917 else if (pFrame->enmReturnType == RTDBGRETURNTYPE_IRET32_V86)
918 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &pFrame->AddrReturnStack,
919 ((uint32_t)pUnwindCtx->m_State.u.x86.auSegs[X86_SREG_SS] << 4)
920 + pUnwindCtx->m_State.u.x86.auRegs[X86_GREG_xSP]);
921 else
922 {
923 pFrame->AddrReturnStack.off = pUnwindCtx->m_State.u.x86.auRegs[X86_GREG_xSP];
924 pFrame->AddrReturnStack.FlatPtr += pFrame->AddrReturnStack.off - pFrame->AddrStack.off;
925 }
926 }
927 else
928 {
929 pFrame->AddrReturnStack.off += cbStackItem + cbRetAddr;
930 pFrame->AddrReturnStack.FlatPtr += cbStackItem + cbRetAddr;
931 }
932
933 /*
934 * Return PC.
935 */
936 pFrame->AddrReturnPC = pFrame->AddrPC;
937 if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO)
938 {
939 if (RTDbgReturnTypeIsNear(pFrame->enmReturnType))
940 {
941 pFrame->AddrReturnPC.off = pUnwindCtx->m_State.uPc;
942 pFrame->AddrReturnPC.FlatPtr += pFrame->AddrReturnPC.off - pFrame->AddrPC.off;
943 }
944 else
945 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC,
946 pUnwindCtx->m_State.u.x86.auSegs[X86_SREG_CS], pUnwindCtx->m_State.uPc);
947 }
948 else
949 {
950 int rc2;
951 switch (pFrame->enmReturnType)
952 {
953 case RTDBGRETURNTYPE_NEAR16:
954 if (DBGFADDRESS_IS_VALID(&pFrame->AddrReturnPC))
955 {
956 pFrame->AddrReturnPC.FlatPtr += *uRet.pu16 - pFrame->AddrReturnPC.off;
957 pFrame->AddrReturnPC.off = *uRet.pu16;
958 }
959 else
960 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &pFrame->AddrReturnPC, *uRet.pu16);
961 break;
962 case RTDBGRETURNTYPE_NEAR32:
963 if (DBGFADDRESS_IS_VALID(&pFrame->AddrReturnPC))
964 {
965 pFrame->AddrReturnPC.FlatPtr += *uRet.pu32 - pFrame->AddrReturnPC.off;
966 pFrame->AddrReturnPC.off = *uRet.pu32;
967 }
968 else
969 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &pFrame->AddrReturnPC, *uRet.pu32);
970 break;
971 case RTDBGRETURNTYPE_NEAR64:
972 if (DBGFADDRESS_IS_VALID(&pFrame->AddrReturnPC))
973 {
974 pFrame->AddrReturnPC.FlatPtr += *uRet.pu64 - pFrame->AddrReturnPC.off;
975 pFrame->AddrReturnPC.off = *uRet.pu64;
976 }
977 else
978 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &pFrame->AddrReturnPC, *uRet.pu64);
979 break;
980 case RTDBGRETURNTYPE_FAR16:
981 rc2 = DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[1], uRet.pu16[0]);
982 if (RT_SUCCESS(rc2))
983 break;
984 rc2 = DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, pFrame->AddrPC.Sel, uRet.pu16[0]);
985 if (RT_SUCCESS(rc2))
986 pFrame->enmReturnType = RTDBGRETURNTYPE_NEAR16;
987 else
988 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[1], uRet.pu16[0]);
989 break;
990 case RTDBGRETURNTYPE_FAR32:
991 rc2 = DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[2], uRet.pu32[0]);
992 if (RT_SUCCESS(rc2))
993 break;
994 rc2 = DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, pFrame->AddrPC.Sel, uRet.pu32[0]);
995 if (RT_SUCCESS(rc2))
996 pFrame->enmReturnType = RTDBGRETURNTYPE_NEAR32;
997 else
998 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[2], uRet.pu32[0]);
999 break;
1000 case RTDBGRETURNTYPE_FAR64:
1001 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[4], uRet.pu64[0]);
1002 break;
1003 case RTDBGRETURNTYPE_IRET16:
1004 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[1], uRet.pu16[0]);
1005 break;
1006 case RTDBGRETURNTYPE_IRET32:
1007 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[2], uRet.pu32[0]);
1008 break;
1009 case RTDBGRETURNTYPE_IRET32_PRIV:
1010 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[2], uRet.pu32[0]);
1011 break;
1012 case RTDBGRETURNTYPE_IRET32_V86:
1013 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[2], uRet.pu32[0]);
1014 break;
1015 case RTDBGRETURNTYPE_IRET64:
1016 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[4], uRet.pu64[0]);
1017 break;
1018 default:
1019 AssertMsgFailed(("enmReturnType=%d\n", pFrame->enmReturnType));
1020 return VERR_INVALID_PARAMETER;
1021 }
1022 }
1023
1024
1025 pFrame->pSymReturnPC = DBGFR3AsSymbolByAddrA(pUnwindCtx->m_pUVM, pUnwindCtx->m_hAs, &pFrame->AddrReturnPC,
1026 RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL | RTDBGSYMADDR_FLAGS_SKIP_ABS_IN_DEFERRED,
1027 NULL /*poffDisp*/, NULL /*phMod*/);
1028 pFrame->pLineReturnPC = DBGFR3AsLineByAddrA(pUnwindCtx->m_pUVM, pUnwindCtx->m_hAs, &pFrame->AddrReturnPC,
1029 NULL /*poffDisp*/, NULL /*phMod*/);
1030
1031 /*
1032 * Frame bitness flag.
1033 */
1034 /** @todo use previous return type for this? */
1035 pFrame->fFlags &= ~(DBGFSTACKFRAME_FLAGS_16BIT | DBGFSTACKFRAME_FLAGS_32BIT | DBGFSTACKFRAME_FLAGS_64BIT);
1036 switch (cbStackItem)
1037 {
1038 case 2: pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_16BIT; break;
1039 case 4: pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_32BIT; break;
1040 case 8: pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_64BIT; break;
1041 default: AssertMsgFailedReturn(("cbStackItem=%d\n", cbStackItem), VERR_DBGF_STACK_IPE_2);
1042 }
1043
1044 /*
1045 * The arguments.
1046 */
1047 memcpy(&pFrame->Args, uArgs.pv, sizeof(pFrame->Args));
1048
1049 /*
1050 * Collect register changes.
1051 * Then call the OS layer to assist us (e.g. NT trap frames).
1052 */
1053 if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO)
1054 {
1055 rc = dbgfR3StackWalkCollectRegisterChanges(pUnwindCtx->m_pUVM, pFrame, &pUnwindCtx->m_State);
1056 if (RT_FAILURE(rc))
1057 return rc;
1058
1059 if ( pUnwindCtx->m_pInitialCtx
1060 && pUnwindCtx->m_hAs != NIL_RTDBGAS)
1061 {
1062 rc = dbgfR3OSStackUnwindAssist(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, pFrame, &pUnwindCtx->m_State,
1063 pUnwindCtx->m_pInitialCtx, pUnwindCtx->m_hAs, &pUnwindCtx->m_uOsScratch);
1064 if (RT_FAILURE(rc))
1065 return rc;
1066 }
1067 }
1068
1069 /*
1070 * Try use unwind information to locate the return frame pointer (for the
1071 * next loop iteration).
1072 */
1073 Assert(!(pFrame->fFlags & DBGFSTACKFRAME_FLAGS_UNWIND_INFO_RET));
1074 pFrame->enmReturnFrameReturnType = RTDBGRETURNTYPE_INVALID;
1075 if (!(pFrame->fFlags & DBGFSTACKFRAME_FLAGS_LAST))
1076 {
1077 /* Set PC and SP if we didn't unwind our way here (context will then point
1078 and the return PC and SP already). */
1079 if (!(pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO))
1080 {
1081 dbgfR3UnwindCtxSetPcAndSp(pUnwindCtx, &pFrame->AddrReturnPC, &pFrame->AddrReturnStack);
1082 pUnwindCtx->m_State.u.x86.auRegs[X86_GREG_xBP] = pFrame->AddrReturnFrame.off;
1083 }
1084 /** @todo Reevaluate CS if the previous frame return type isn't near. */
1085 if ( pUnwindCtx->m_State.enmArch == RTLDRARCH_AMD64
1086 || pUnwindCtx->m_State.enmArch == RTLDRARCH_X86_32
1087 || pUnwindCtx->m_State.enmArch == RTLDRARCH_X86_16)
1088 pUnwindCtx->m_State.u.x86.Loaded.fAll = 0;
1089 else
1090 AssertFailed();
1091 if (dbgfR3UnwindCtxDoOneFrame(pUnwindCtx))
1092 {
1093 if (pUnwindCtx->m_fIsHostRing0)
1094 DBGFR3AddrFromHostR0(&pFrame->AddrReturnFrame, pUnwindCtx->m_State.u.x86.FrameAddr.off);
1095 else
1096 {
1097 DBGFADDRESS AddrReturnFrame = pFrame->AddrReturnFrame;
1098 rc = DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &AddrReturnFrame,
1099 pUnwindCtx->m_State.u.x86.FrameAddr.sel, pUnwindCtx->m_State.u.x86.FrameAddr.off);
1100 if (RT_SUCCESS(rc))
1101 pFrame->AddrReturnFrame = AddrReturnFrame;
1102 }
1103 pFrame->enmReturnFrameReturnType = pUnwindCtx->m_State.enmRetType;
1104 pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_UNWIND_INFO_RET;
1105 }
1106 }
1107
1108 return VINF_SUCCESS;
1109}
1110#endif
1111
1112
1113/**
1114 * Walks the entire stack allocating memory as we walk.
1115 */
1116static DECLCALLBACK(int) dbgfR3StackWalkCtxFull(PUVM pUVM, VMCPUID idCpu, PCCPUMCTX pCtx, RTDBGAS hAs,
1117 DBGFCODETYPE enmCodeType,
1118 PCDBGFADDRESS pAddrFrame,
1119 PCDBGFADDRESS pAddrStack,
1120 PCDBGFADDRESS pAddrPC,
1121 RTDBGRETURNTYPE const *penmReturnType,
1122 PCDBGFSTACKFRAME *ppFirstFrame)
1123{
1124 RTDBGRETURNTYPE const enmReturnType = *penmReturnType; /* darwin/arm64 fun, see @bugref{10725} */
1125 DBGFUNWINDCTX UnwindCtx(pUVM, idCpu, pCtx, hAs);
1126
1127 /* alloc first frame. */
1128 PDBGFSTACKFRAME pCur = (PDBGFSTACKFRAME)MMR3HeapAllocZU(pUVM, MM_TAG_DBGF_STACK, sizeof(*pCur));
1129 if (!pCur)
1130 return VERR_NO_MEMORY;
1131
1132 /*
1133 * Initialize the frame.
1134 */
1135 pCur->pNextInternal = NULL;
1136 pCur->pFirstInternal = pCur;
1137
1138 int rc = VINF_SUCCESS;
1139#if defined(VBOX_VMM_TARGET_ARMV8)
1140 if (pAddrPC)
1141 pCur->AddrPC = *pAddrPC;
1142 else
1143 DBGFR3AddrFromFlat(pUVM, &pCur->AddrPC, pCtx->Pc.u64);
1144#else
1145 if (pAddrPC)
1146 pCur->AddrPC = *pAddrPC;
1147 else if (enmCodeType != DBGFCODETYPE_GUEST)
1148 DBGFR3AddrFromFlat(pUVM, &pCur->AddrPC, pCtx->rip);
1149 else
1150 rc = DBGFR3AddrFromSelOff(pUVM, idCpu, &pCur->AddrPC, pCtx->cs.Sel, pCtx->rip);
1151#endif
1152 if (RT_SUCCESS(rc))
1153 {
1154 uint64_t fAddrMask;
1155 if (enmCodeType == DBGFCODETYPE_RING0)
1156 fAddrMask = HC_ARCH_BITS == 64 ? UINT64_MAX : UINT32_MAX;
1157 else if (enmCodeType == DBGFCODETYPE_HYPER)
1158 fAddrMask = UINT32_MAX;
1159 else if (DBGFADDRESS_IS_FAR16(&pCur->AddrPC))
1160 fAddrMask = UINT16_MAX;
1161 else if (DBGFADDRESS_IS_FAR32(&pCur->AddrPC))
1162 fAddrMask = UINT32_MAX;
1163 else if (DBGFADDRESS_IS_FAR64(&pCur->AddrPC))
1164 fAddrMask = UINT64_MAX;
1165 else
1166 {
1167 PVMCPU const pVCpu = pUVM->pVM->apCpusR3[idCpu];
1168 CPUMMODE const enmCpuMode = CPUMGetGuestMode(pVCpu);
1169#if defined(VBOX_VMM_TARGET_ARMV8)
1170 /** @todo */
1171 Assert(enmCpuMode == CPUMMODE_ARMV8_AARCH64); RT_NOREF(enmCpuMode);
1172 fAddrMask = UINT64_MAX;
1173 if (enmReturnType == RTDBGRETURNTYPE_INVALID)
1174 pCur->enmReturnType = RTDBGRETURNTYPE_NEAR64;
1175#else
1176 if (enmCpuMode == CPUMMODE_REAL)
1177 {
1178 fAddrMask = UINT16_MAX;
1179 if (enmReturnType == RTDBGRETURNTYPE_INVALID)
1180 pCur->enmReturnType = RTDBGRETURNTYPE_NEAR16;
1181 }
1182 else if ( enmCpuMode == CPUMMODE_PROTECTED
1183 || !CPUMIsGuestIn64BitCode(pVCpu))
1184 {
1185 fAddrMask = UINT32_MAX;
1186 if (enmReturnType == RTDBGRETURNTYPE_INVALID)
1187 pCur->enmReturnType = RTDBGRETURNTYPE_NEAR32;
1188 }
1189 else
1190 {
1191 fAddrMask = UINT64_MAX;
1192 if (enmReturnType == RTDBGRETURNTYPE_INVALID)
1193 pCur->enmReturnType = RTDBGRETURNTYPE_NEAR64;
1194 }
1195#endif
1196 }
1197
1198#if !defined(VBOX_VMM_TARGET_ARMV8)
1199 if (enmReturnType == RTDBGRETURNTYPE_INVALID)
1200 switch (pCur->AddrPC.fFlags & DBGFADDRESS_FLAGS_TYPE_MASK)
1201 {
1202 case DBGFADDRESS_FLAGS_FAR16: pCur->enmReturnType = RTDBGRETURNTYPE_NEAR16; break;
1203 case DBGFADDRESS_FLAGS_FAR32: pCur->enmReturnType = RTDBGRETURNTYPE_NEAR32; break;
1204 case DBGFADDRESS_FLAGS_FAR64: pCur->enmReturnType = RTDBGRETURNTYPE_NEAR64; break;
1205 case DBGFADDRESS_FLAGS_RING0:
1206 pCur->enmReturnType = HC_ARCH_BITS == 64 ? RTDBGRETURNTYPE_NEAR64 : RTDBGRETURNTYPE_NEAR32;
1207 break;
1208 default:
1209 pCur->enmReturnType = RTDBGRETURNTYPE_NEAR32;
1210 break;
1211 }
1212#endif
1213
1214
1215#if defined(VBOX_VMM_TARGET_ARMV8)
1216 if (pAddrStack)
1217 pCur->AddrStack = *pAddrStack;
1218 else
1219 DBGFR3AddrFromFlat(pUVM, &pCur->AddrStack, pCtx->aSpReg[1].u64 & fAddrMask); /** @todo EL0 stack. */
1220
1221 Assert(!(pCur->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO));
1222 if (pAddrFrame)
1223 pCur->AddrFrame = *pAddrFrame;
1224 else
1225 DBGFR3AddrFromFlat(pUVM, &pCur->AddrFrame, pCtx->aGRegs[ARMV8_A64_REG_BP].x & fAddrMask);
1226#else
1227 if (pAddrStack)
1228 pCur->AddrStack = *pAddrStack;
1229 else if (enmCodeType != DBGFCODETYPE_GUEST)
1230 DBGFR3AddrFromFlat(pUVM, &pCur->AddrStack, pCtx->rsp & fAddrMask);
1231 else
1232 rc = DBGFR3AddrFromSelOff(pUVM, idCpu, &pCur->AddrStack, pCtx->ss.Sel, pCtx->rsp & fAddrMask);
1233
1234 Assert(!(pCur->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO));
1235 if (pAddrFrame)
1236 pCur->AddrFrame = *pAddrFrame;
1237 else if (enmCodeType != DBGFCODETYPE_GUEST)
1238 DBGFR3AddrFromFlat(pUVM, &pCur->AddrFrame, pCtx->rbp & fAddrMask);
1239 else if (RT_SUCCESS(rc))
1240 rc = DBGFR3AddrFromSelOff(pUVM, idCpu, &pCur->AddrFrame, pCtx->ss.Sel, pCtx->rbp & fAddrMask);
1241#endif
1242
1243 /*
1244 * Try unwind and get a better frame pointer and state.
1245 */
1246 if ( RT_SUCCESS(rc)
1247 && dbgfR3UnwindCtxSetPcAndSp(&UnwindCtx, &pCur->AddrPC, &pCur->AddrStack)
1248 && dbgfR3UnwindCtxDoOneFrame(&UnwindCtx))
1249 {
1250 pCur->enmReturnType = UnwindCtx.m_State.enmRetType;
1251 pCur->fFlags |= DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO;
1252#if defined(VBOX_VMM_TARGET_ARMV8)
1253 DBGFR3AddrFromFlat(pUVM, &pCur->AddrFrame, UnwindCtx.m_State.u.armv8.FrameAddr);
1254#else
1255 if (!UnwindCtx.m_fIsHostRing0)
1256 rc = DBGFR3AddrFromSelOff(UnwindCtx.m_pUVM, UnwindCtx.m_idCpu, &pCur->AddrFrame,
1257 UnwindCtx.m_State.u.x86.FrameAddr.sel, UnwindCtx.m_State.u.x86.FrameAddr.off);
1258 else
1259 DBGFR3AddrFromHostR0(&pCur->AddrFrame, UnwindCtx.m_State.u.x86.FrameAddr.off);
1260#endif
1261 }
1262 /*
1263 * The first frame.
1264 */
1265 if (RT_SUCCESS(rc))
1266 {
1267 if (DBGFADDRESS_IS_VALID(&pCur->AddrPC))
1268 {
1269 pCur->pSymPC = DBGFR3AsSymbolByAddrA(pUVM, hAs, &pCur->AddrPC,
1270 RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL | RTDBGSYMADDR_FLAGS_SKIP_ABS_IN_DEFERRED,
1271 NULL /*poffDisp*/, NULL /*phMod*/);
1272 pCur->pLinePC = DBGFR3AsLineByAddrA(pUVM, hAs, &pCur->AddrPC, NULL /*poffDisp*/, NULL /*phMod*/);
1273 }
1274
1275 rc = dbgfR3StackWalk(&UnwindCtx, pCur, true /*fFirst*/);
1276 }
1277 }
1278 else
1279 pCur->enmReturnType = enmReturnType;
1280 if (RT_FAILURE(rc))
1281 {
1282 DBGFR3StackWalkEnd(pCur);
1283 return rc;
1284 }
1285
1286 /*
1287 * The other frames.
1288 */
1289 DBGFSTACKFRAME Next = *pCur;
1290 while (!(pCur->fFlags & (DBGFSTACKFRAME_FLAGS_LAST | DBGFSTACKFRAME_FLAGS_MAX_DEPTH | DBGFSTACKFRAME_FLAGS_LOOP)))
1291 {
1292 Next.cSureRegs = 0;
1293 Next.paSureRegs = NULL;
1294
1295 /* try walk. */
1296 rc = dbgfR3StackWalk(&UnwindCtx, &Next, false /*fFirst*/);
1297 if (RT_FAILURE(rc))
1298 break;
1299
1300 /* add the next frame to the chain. */
1301 PDBGFSTACKFRAME pNext = (PDBGFSTACKFRAME)MMR3HeapAllocU(pUVM, MM_TAG_DBGF_STACK, sizeof(*pNext));
1302 if (!pNext)
1303 {
1304 DBGFR3StackWalkEnd(pCur);
1305 return VERR_NO_MEMORY;
1306 }
1307 *pNext = Next;
1308 pCur->pNextInternal = pNext;
1309 pCur = pNext;
1310 Assert(pCur->pNextInternal == NULL);
1311
1312 /* check for loop */
1313 for (PCDBGFSTACKFRAME pLoop = pCur->pFirstInternal;
1314 pLoop && pLoop != pCur;
1315 pLoop = pLoop->pNextInternal)
1316 if (pLoop->AddrFrame.FlatPtr == pCur->AddrFrame.FlatPtr)
1317 {
1318 pCur->fFlags |= DBGFSTACKFRAME_FLAGS_LOOP;
1319 break;
1320 }
1321
1322 /* check for insane recursion */
1323 if (pCur->iFrame >= 2048)
1324 pCur->fFlags |= DBGFSTACKFRAME_FLAGS_MAX_DEPTH;
1325 }
1326
1327 *ppFirstFrame = pCur->pFirstInternal;
1328 return rc;
1329}
1330
1331
1332/**
1333 * Common worker for DBGFR3StackWalkBeginGuestEx, DBGFR3StackWalkBeginHyperEx,
1334 * DBGFR3StackWalkBeginGuest and DBGFR3StackWalkBeginHyper.
1335 */
1336static int dbgfR3StackWalkBeginCommon(PUVM pUVM,
1337 VMCPUID idCpu,
1338 DBGFCODETYPE enmCodeType,
1339 PCDBGFADDRESS pAddrFrame,
1340 PCDBGFADDRESS pAddrStack,
1341 PCDBGFADDRESS pAddrPC,
1342 RTDBGRETURNTYPE enmReturnType,
1343 PCDBGFSTACKFRAME *ppFirstFrame)
1344{
1345 /*
1346 * Validate parameters.
1347 */
1348 *ppFirstFrame = NULL;
1349 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1350 PVM pVM = pUVM->pVM;
1351 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1352 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
1353 if (pAddrFrame)
1354 AssertReturn(DBGFR3AddrIsValid(pUVM, pAddrFrame), VERR_INVALID_PARAMETER);
1355 if (pAddrStack)
1356 AssertReturn(DBGFR3AddrIsValid(pUVM, pAddrStack), VERR_INVALID_PARAMETER);
1357 if (pAddrPC)
1358 AssertReturn(DBGFR3AddrIsValid(pUVM, pAddrPC), VERR_INVALID_PARAMETER);
1359 AssertReturn(enmReturnType >= RTDBGRETURNTYPE_INVALID && enmReturnType < RTDBGRETURNTYPE_END, VERR_INVALID_PARAMETER);
1360
1361 /*
1362 * Get the CPUM context pointer and pass it on the specified EMT.
1363 */
1364 RTDBGAS hAs;
1365 PCCPUMCTX pCtx;
1366 switch (enmCodeType)
1367 {
1368 case DBGFCODETYPE_GUEST:
1369 pCtx = CPUMQueryGuestCtxPtr(pVM->apCpusR3[idCpu]);
1370 hAs = DBGF_AS_GLOBAL;
1371 break;
1372 case DBGFCODETYPE_HYPER:
1373 pCtx = CPUMQueryGuestCtxPtr(pVM->apCpusR3[idCpu]);
1374 hAs = DBGF_AS_RC_AND_GC_GLOBAL;
1375 break;
1376 case DBGFCODETYPE_RING0:
1377 pCtx = NULL; /* No valid context present. */
1378 hAs = DBGF_AS_R0;
1379 break;
1380 default:
1381 AssertFailedReturn(VERR_INVALID_PARAMETER);
1382 }
1383 return VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3StackWalkCtxFull, 10 | VMREQ_F_EXTRA_ARGS_ALL_PTRS,
1384 pUVM, idCpu, pCtx, hAs, enmCodeType, pAddrFrame, pAddrStack, pAddrPC,
1385 &enmReturnType, ppFirstFrame);
1386}
1387
1388
1389/**
1390 * Begins a guest stack walk, extended version.
1391 *
1392 * This will walk the current stack, constructing a list of info frames which is
1393 * returned to the caller. The caller uses DBGFR3StackWalkNext to traverse the
1394 * list and DBGFR3StackWalkEnd to release it.
1395 *
1396 * @returns VINF_SUCCESS on success.
1397 * @returns VERR_NO_MEMORY if we're out of memory.
1398 *
1399 * @param pUVM The user mode VM handle.
1400 * @param idCpu The ID of the virtual CPU which stack we want to walk.
1401 * @param enmCodeType Code type
1402 * @param pAddrFrame Frame address to start at. (Optional)
1403 * @param pAddrStack Stack address to start at. (Optional)
1404 * @param pAddrPC Program counter to start at. (Optional)
1405 * @param enmReturnType The return address type. (Optional)
1406 * @param ppFirstFrame Where to return the pointer to the first info frame.
1407 */
1408VMMR3DECL(int) DBGFR3StackWalkBeginEx(PUVM pUVM,
1409 VMCPUID idCpu,
1410 DBGFCODETYPE enmCodeType,
1411 PCDBGFADDRESS pAddrFrame,
1412 PCDBGFADDRESS pAddrStack,
1413 PCDBGFADDRESS pAddrPC,
1414 RTDBGRETURNTYPE enmReturnType,
1415 PCDBGFSTACKFRAME *ppFirstFrame)
1416{
1417 return dbgfR3StackWalkBeginCommon(pUVM, idCpu, enmCodeType, pAddrFrame, pAddrStack, pAddrPC, enmReturnType, ppFirstFrame);
1418}
1419
1420
1421/**
1422 * Begins a guest stack walk.
1423 *
1424 * This will walk the current stack, constructing a list of info frames which is
1425 * returned to the caller. The caller uses DBGFR3StackWalkNext to traverse the
1426 * list and DBGFR3StackWalkEnd to release it.
1427 *
1428 * @returns VINF_SUCCESS on success.
1429 * @returns VERR_NO_MEMORY if we're out of memory.
1430 *
1431 * @param pUVM The user mode VM handle.
1432 * @param idCpu The ID of the virtual CPU which stack we want to walk.
1433 * @param enmCodeType Code type
1434 * @param ppFirstFrame Where to return the pointer to the first info frame.
1435 */
1436VMMR3DECL(int) DBGFR3StackWalkBegin(PUVM pUVM, VMCPUID idCpu, DBGFCODETYPE enmCodeType, PCDBGFSTACKFRAME *ppFirstFrame)
1437{
1438 return dbgfR3StackWalkBeginCommon(pUVM, idCpu, enmCodeType, NULL, NULL, NULL, RTDBGRETURNTYPE_INVALID, ppFirstFrame);
1439}
1440
1441/**
1442 * Gets the next stack frame.
1443 *
1444 * @returns Pointer to the info for the next stack frame.
1445 * NULL if no more frames.
1446 *
1447 * @param pCurrent Pointer to the current stack frame.
1448 *
1449 */
1450VMMR3DECL(PCDBGFSTACKFRAME) DBGFR3StackWalkNext(PCDBGFSTACKFRAME pCurrent)
1451{
1452 return pCurrent
1453 ? pCurrent->pNextInternal
1454 : NULL;
1455}
1456
1457
1458/**
1459 * Ends a stack walk process.
1460 *
1461 * This *must* be called after a successful first call to any of the stack
1462 * walker functions. If not called we will leak memory or other resources.
1463 *
1464 * @param pFirstFrame The frame returned by one of the begin functions.
1465 */
1466VMMR3DECL(void) DBGFR3StackWalkEnd(PCDBGFSTACKFRAME pFirstFrame)
1467{
1468 if ( !pFirstFrame
1469 || !pFirstFrame->pFirstInternal)
1470 return;
1471
1472 PDBGFSTACKFRAME pFrame = (PDBGFSTACKFRAME)pFirstFrame->pFirstInternal;
1473 while (pFrame)
1474 {
1475 PDBGFSTACKFRAME pCur = pFrame;
1476 pFrame = (PDBGFSTACKFRAME)pCur->pNextInternal;
1477 if (pFrame)
1478 {
1479 if (pCur->pSymReturnPC == pFrame->pSymPC)
1480 pFrame->pSymPC = NULL;
1481 if (pCur->pSymReturnPC == pFrame->pSymReturnPC)
1482 pFrame->pSymReturnPC = NULL;
1483
1484 if (pCur->pSymPC == pFrame->pSymPC)
1485 pFrame->pSymPC = NULL;
1486 if (pCur->pSymPC == pFrame->pSymReturnPC)
1487 pFrame->pSymReturnPC = NULL;
1488
1489 if (pCur->pLineReturnPC == pFrame->pLinePC)
1490 pFrame->pLinePC = NULL;
1491 if (pCur->pLineReturnPC == pFrame->pLineReturnPC)
1492 pFrame->pLineReturnPC = NULL;
1493
1494 if (pCur->pLinePC == pFrame->pLinePC)
1495 pFrame->pLinePC = NULL;
1496 if (pCur->pLinePC == pFrame->pLineReturnPC)
1497 pFrame->pLineReturnPC = NULL;
1498 }
1499
1500 RTDbgSymbolFree(pCur->pSymPC);
1501 RTDbgSymbolFree(pCur->pSymReturnPC);
1502 RTDbgLineFree(pCur->pLinePC);
1503 RTDbgLineFree(pCur->pLineReturnPC);
1504
1505 if (pCur->paSureRegs)
1506 {
1507 MMR3HeapFree(pCur->paSureRegs);
1508 pCur->paSureRegs = NULL;
1509 pCur->cSureRegs = 0;
1510 }
1511
1512 pCur->pNextInternal = NULL;
1513 pCur->pFirstInternal = NULL;
1514 pCur->fFlags = 0;
1515 MMR3HeapFree(pCur);
1516 }
1517}
1518
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette