VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/DBGFStack.cpp@ 73446

Last change on this file since 73446 was 73446, checked in by vboxsync, 6 years ago

DBGFStack.cpp: Refactored the code in prep for IPRT move. Added a set of 'sure' registers to the frames so we can display non-volatile register changes. [build fix]

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 62.6 KB
Line 
1/* $Id: DBGFStack.cpp 73446 2018-08-02 10:59:46Z vboxsync $ */
2/** @file
3 * DBGF - Debugger Facility, Call Stack Analyser.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_DBGF
23#include <VBox/vmm/dbgf.h>
24#include <VBox/vmm/selm.h>
25#include <VBox/vmm/mm.h>
26#include "DBGFInternal.h"
27#include <VBox/vmm/vm.h>
28#include <VBox/vmm/uvm.h>
29#include <VBox/err.h>
30#include <VBox/log.h>
31#include <iprt/param.h>
32#include <iprt/assert.h>
33#include <iprt/alloca.h>
34#include <iprt/mem.h>
35#include <iprt/string.h>
36#include <iprt/formats/pecoff.h>
37
38
39/*********************************************************************************************************************************
40* Structures and Typedefs *
41*********************************************************************************************************************************/
42/** Magic value for DBGFUNWINDSTATE::u32Magic (James Moody). */
43#define DBGFUNWINDSTATE_MAGIC UINT32_C(0x19250326)
44/** Magic value for DBGFUNWINDSTATE::u32Magic after use. */
45#define DBGFUNWINDSTATE_MAGIC_DEAD UINT32_C(0x20101209)
46
47/**
48 * Register state.
49 */
50typedef struct DBGFUNWINDSTATE
51{
52 /** Structure magic (DBGFUNWINDSTATE_MAGIC) */
53 uint32_t u32Magic;
54 /** The state architecture. */
55 RTLDRARCH enmArch;
56
57 /** The program counter register.
58 * amd64/x86: RIP/EIP/IP
59 * sparc: PC
60 * arm32: PC / R15
61 */
62 uint64_t uPc;
63
64 /** Return type. */
65 DBGFRETRUNTYPE enmRetType;
66
67 /** Register state (see enmArch). */
68 union
69 {
70 /** RTLDRARCH_AMD64, RTLDRARCH_X86_32 and RTLDRARCH_X86_16. */
71 struct
72 {
73 /** General purpose registers indexed by X86_GREG_XXX. */
74 uint64_t auRegs[16];
75 /** The frame address. */
76 RTFAR64 FrameAddr;
77 /** Set if we're in real or virtual 8086 mode. */
78 bool fRealOrV86;
79 /** The flags register. */
80 uint64_t uRFlags;
81 /** Trap error code. */
82 uint64_t uErrCd;
83 /** Segment registers (indexed by X86_SREG_XXX). */
84 uint16_t auSegs[6];
85
86 /** Bitmap tracking register we've loaded and which content can possibly be trusted. */
87 union
88 {
89 /** For effective clearing of the bits. */
90 uint32_t fAll;
91 /** Detailed view. */
92 struct
93 {
94 /** Bitmap indicating whether a GPR was loaded (parallel to auRegs). */
95 uint16_t fRegs;
96 /** Bitmap indicating whether a segment register was loaded (parallel to auSegs). */
97 uint8_t fSegs;
98 /** Set if uPc was loaded. */
99 uint8_t fPc : 1;
100 /** Set if FrameAddr was loaded. */
101 uint8_t fFrameAddr : 1;
102 /** Set if uRFlags was loaded. */
103 uint8_t fRFlags : 1;
104 /** Set if uErrCd was loaded. */
105 uint8_t fErrCd : 1;
106 } s;
107 } Loaded;
108 } x86;
109
110 /** @todo add ARM and others as needed. */
111 } u;
112
113 /**
114 * Stack read callback.
115 *
116 * @returns IPRT status code.
117 * @param pThis Pointer to this structure.
118 * @param uSp The stack pointer address.
119 * @param cbToRead The number of bytes to read.
120 * @param pvDst Where to put the bytes we read.
121 */
122 DECLCALLBACKMEMBER(int, pfnReadStack)(struct DBGFUNWINDSTATE *pThis, RTUINTPTR uSp, size_t cbToRead, void *pvDst);
123 /** User argument (usefule for pfnReadStack). */
124 void *pvUser;
125
126} DBGFUNWINDSTATE;
127typedef struct DBGFUNWINDSTATE *PDBGFUNWINDSTATE;
128typedef struct DBGFUNWINDSTATE const *PCDBGFUNWINDSTATE;
129
130static DECLCALLBACK(int) dbgfR3StackReadCallback(PDBGFUNWINDSTATE pThis, RTUINTPTR uSp, size_t cbToRead, void *pvDst);
131
132
133/**
134 * Unwind context.
135 *
136 * @note Using a constructor and destructor here for simple+safe cleanup.
137 *
138 * @todo Generalize and move to IPRT or some such place.
139 */
140typedef struct DBGFUNWINDCTX
141{
142 PUVM m_pUVM;
143 VMCPUID m_idCpu;
144 RTDBGAS m_hAs;
145
146 DBGFUNWINDSTATE m_State;
147
148 RTDBGMOD m_hCached;
149 RTUINTPTR m_uCachedMapping;
150 RTUINTPTR m_cbCachedMapping;
151 uint8_t *m_pbCachedInfo;
152 size_t m_cbCachedInfo;
153
154 /** Function table for PE/AMD64 (entire m_pbCachedInfo) . */
155 PCIMAGE_RUNTIME_FUNCTION_ENTRY m_paFunctions;
156 /** Number functions in m_paFunctions. */
157 size_t m_cFunctions;
158
159 DBGFUNWINDCTX(PUVM pUVM, VMCPUID idCpu, PCCPUMCTX pInitialCtx, RTDBGAS hAs)
160 {
161 m_State.u32Magic = DBGFUNWINDSTATE_MAGIC;
162 m_State.enmArch = RTLDRARCH_AMD64;
163 m_State.pfnReadStack = dbgfR3StackReadCallback;
164 m_State.pvUser = this;
165 RT_ZERO(m_State.u);
166 if (pInitialCtx)
167 {
168 m_State.u.x86.auRegs[X86_GREG_xAX] = pInitialCtx->rax;
169 m_State.u.x86.auRegs[X86_GREG_xCX] = pInitialCtx->rcx;
170 m_State.u.x86.auRegs[X86_GREG_xDX] = pInitialCtx->rdx;
171 m_State.u.x86.auRegs[X86_GREG_xBX] = pInitialCtx->rbx;
172 m_State.u.x86.auRegs[X86_GREG_xSP] = pInitialCtx->rsp;
173 m_State.u.x86.auRegs[X86_GREG_xBP] = pInitialCtx->rbp;
174 m_State.u.x86.auRegs[X86_GREG_xSI] = pInitialCtx->rsi;
175 m_State.u.x86.auRegs[X86_GREG_xDI] = pInitialCtx->rdi;
176 m_State.u.x86.auRegs[X86_GREG_x8 ] = pInitialCtx->r8;
177 m_State.u.x86.auRegs[X86_GREG_x9 ] = pInitialCtx->r9;
178 m_State.u.x86.auRegs[X86_GREG_x10] = pInitialCtx->r10;
179 m_State.u.x86.auRegs[X86_GREG_x11] = pInitialCtx->r11;
180 m_State.u.x86.auRegs[X86_GREG_x12] = pInitialCtx->r12;
181 m_State.u.x86.auRegs[X86_GREG_x13] = pInitialCtx->r13;
182 m_State.u.x86.auRegs[X86_GREG_x14] = pInitialCtx->r14;
183 m_State.u.x86.auRegs[X86_GREG_x15] = pInitialCtx->r15;
184 m_State.uPc = pInitialCtx->rip;
185 m_State.u.x86.uRFlags = pInitialCtx->rflags.u;
186 m_State.u.x86.auSegs[X86_SREG_ES] = pInitialCtx->es.Sel;
187 m_State.u.x86.auSegs[X86_SREG_CS] = pInitialCtx->cs.Sel;
188 m_State.u.x86.auSegs[X86_SREG_SS] = pInitialCtx->ss.Sel;
189 m_State.u.x86.auSegs[X86_SREG_DS] = pInitialCtx->ds.Sel;
190 m_State.u.x86.auSegs[X86_SREG_GS] = pInitialCtx->gs.Sel;
191 m_State.u.x86.auSegs[X86_SREG_FS] = pInitialCtx->fs.Sel;
192 m_State.u.x86.fRealOrV86 = CPUMIsGuestInRealOrV86ModeEx(pInitialCtx);
193 }
194
195 m_pUVM = pUVM;
196 m_idCpu = idCpu;
197 m_hAs = DBGFR3AsResolveAndRetain(pUVM, hAs);
198
199 m_hCached = NIL_RTDBGMOD;
200 m_uCachedMapping = 0;
201 m_cbCachedMapping = 0;
202 m_pbCachedInfo = NULL;
203 m_cbCachedInfo = 0;
204 m_paFunctions = NULL;
205 m_cFunctions = 0;
206 }
207
208 ~DBGFUNWINDCTX();
209
210} DBGFUNWINDCTX;
211/** Pointer to unwind context. */
212typedef DBGFUNWINDCTX *PDBGFUNWINDCTX;
213
214
215static void dbgfR3UnwindCtxFlushCache(PDBGFUNWINDCTX pUnwindCtx)
216{
217 if (pUnwindCtx->m_hCached != NIL_RTDBGMOD)
218 {
219 RTDbgModRelease(pUnwindCtx->m_hCached);
220 pUnwindCtx->m_hCached = NIL_RTDBGMOD;
221 }
222 if (pUnwindCtx->m_pbCachedInfo)
223 {
224 RTMemFree(pUnwindCtx->m_pbCachedInfo);
225 pUnwindCtx->m_pbCachedInfo = NULL;
226 }
227 pUnwindCtx->m_cbCachedInfo = 0;
228 pUnwindCtx->m_paFunctions = NULL;
229 pUnwindCtx->m_cFunctions = 0;
230}
231
232
233DBGFUNWINDCTX::~DBGFUNWINDCTX()
234{
235 dbgfR3UnwindCtxFlushCache(this);
236 if (m_hAs != NIL_RTDBGAS)
237 {
238 RTDbgAsRelease(m_hAs);
239 m_hAs = NIL_RTDBGAS;
240 }
241}
242
243
244/**
245 * @interface_method_impl{DBGFUNWINDSTATE,pfnReadStack}
246 */
247static DECLCALLBACK(int) dbgfR3StackReadCallback(PDBGFUNWINDSTATE pThis, RTUINTPTR uSp, size_t cbToRead, void *pvDst)
248{
249 Assert( pThis->enmArch == RTLDRARCH_AMD64
250 || pThis->enmArch == RTLDRARCH_X86_32);
251
252 PDBGFUNWINDCTX pUnwindCtx = (PDBGFUNWINDCTX)pThis->pvUser;
253 DBGFADDRESS SrcAddr;
254 int rc = VINF_SUCCESS;
255 if ( pThis->enmArch == RTLDRARCH_X86_32
256 || pThis->enmArch == RTLDRARCH_X86_16)
257 {
258 if (!pThis->u.x86.fRealOrV86)
259 rc = DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &SrcAddr, pThis->u.x86.auSegs[X86_SREG_SS], uSp);
260 else
261 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &SrcAddr, uSp + ((uint32_t)pThis->u.x86.auSegs[X86_SREG_SS] << 4));
262 }
263 else
264 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &SrcAddr, uSp);
265 if (RT_SUCCESS(rc))
266 rc = DBGFR3MemRead(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &SrcAddr, pvDst, cbToRead);
267 return rc;
268}
269
270
271/**
272 * Sets PC and SP.
273 *
274 * @returns true.
275 * @param pUnwindCtx The unwind context.
276 * @param pAddrPC The program counter (PC) value to set.
277 * @param pAddrStack The stack pointer (SP) value to set.
278 */
279static bool dbgfR3UnwindCtxSetPcAndSp(PDBGFUNWINDCTX pUnwindCtx, PCDBGFADDRESS pAddrPC, PCDBGFADDRESS pAddrStack)
280{
281 Assert( pUnwindCtx->m_State.enmArch == RTLDRARCH_AMD64
282 || pUnwindCtx->m_State.enmArch == RTLDRARCH_X86_32);
283
284 if (!DBGFADDRESS_IS_FAR(pAddrPC))
285 pUnwindCtx->m_State.uPc = pAddrPC->FlatPtr;
286 else
287 {
288 pUnwindCtx->m_State.uPc = pAddrPC->off;
289 pUnwindCtx->m_State.u.x86.auSegs[X86_SREG_CS] = pAddrPC->Sel;
290 }
291 if (!DBGFADDRESS_IS_FAR(pAddrStack))
292 pUnwindCtx->m_State.u.x86.auRegs[X86_GREG_xSP] = pAddrStack->FlatPtr;
293 else
294 {
295 pUnwindCtx->m_State.u.x86.auRegs[X86_GREG_xSP] = pAddrStack->off;
296 pUnwindCtx->m_State.u.x86.auSegs[X86_SREG_SS] = pAddrStack->Sel;
297 }
298 return true;
299}
300
301
302/**
303 * Try read a 16-bit value off the stack.
304 *
305 * @returns pfnReadStack result.
306 * @param pUnwindCtx The unwind context.
307 * @param uSrcAddr The stack address.
308 * @param puDst The read destination.
309 */
310DECLINLINE(int) dbgUnwindLoadStackU16(PDBGFUNWINDSTATE pThis, uint64_t uSrcAddr, uint16_t *puDst)
311{
312 return pThis->pfnReadStack(pThis, uSrcAddr, sizeof(*puDst), puDst);
313}
314
315
316/**
317 * Try read a 64-bit value off the stack.
318 *
319 * @returns pfnReadStack result.
320 * @param pUnwindCtx The unwind context.
321 * @param uSrcAddr The stack address.
322 * @param puDst The read destination.
323 */
324DECLINLINE(int) dbgUnwindLoadStackU64(PDBGFUNWINDSTATE pThis, uint64_t uSrcAddr, uint64_t *puDst)
325{
326 return pThis->pfnReadStack(pThis, uSrcAddr, sizeof(*puDst), puDst);
327}
328
329
330/**
331 * Binary searches the lookup table.
332 *
333 * @returns RVA of unwind info on success, UINT32_MAX on failure.
334 * @param paFunctions The table to lookup @a uRva in.
335 * @param iEnd Size of the table.
336 * @param uRva The RVA of the function we want.
337 */
338DECLINLINE(PCIMAGE_RUNTIME_FUNCTION_ENTRY)
339dbgfR3UnwindCtxLookupUnwindInfoRva(PCIMAGE_RUNTIME_FUNCTION_ENTRY paFunctions, size_t iEnd, uint32_t uRva)
340{
341 size_t iBegin = 0;
342 while (iBegin < iEnd)
343 {
344 size_t const i = iBegin + (iEnd - iBegin) / 2;
345 PCIMAGE_RUNTIME_FUNCTION_ENTRY pEntry = &paFunctions[i];
346 if (uRva < pEntry->BeginAddress)
347 iEnd = i;
348 else if (uRva > pEntry->EndAddress)
349 iBegin = i + 1;
350 else
351 return pEntry;
352 }
353 return NULL;
354}
355
356
357/**
358 * Processes an IRET frame.
359 *
360 * @returns true.
361 * @param pThis The unwind state being worked.
362 * @param fErrCd Non-zero if there is an error code on the stack.
363 */
364static bool dbgUnwindPeAmd64DoOneIRet(PDBGFUNWINDSTATE pThis, uint8_t fErrCd)
365{
366 Assert(fErrCd <= 1);
367 if (!fErrCd)
368 pThis->u.x86.Loaded.s.fErrCd = 0;
369 else
370 {
371 pThis->u.x86.uErrCd = 0;
372 pThis->u.x86.Loaded.s.fErrCd = 1;
373 dbgUnwindLoadStackU64(pThis, pThis->u.x86.auRegs[X86_GREG_xSP], &pThis->u.x86.uErrCd);
374 pThis->u.x86.auRegs[X86_GREG_xSP] += 8;
375 }
376
377 pThis->enmRetType = DBGFRETURNTYPE_IRET64;
378 pThis->u.x86.FrameAddr.off = pThis->u.x86.auRegs[X86_GREG_xSP] - /* pretend rbp is pushed on the stack */ 8;
379 pThis->u.x86.FrameAddr.sel = pThis->u.x86.auSegs[X86_SREG_SS];
380
381 dbgUnwindLoadStackU64(pThis, pThis->u.x86.auRegs[X86_GREG_xSP], &pThis->uPc);
382 pThis->u.x86.auRegs[X86_GREG_xSP] += 8; /* RIP */
383
384 dbgUnwindLoadStackU16(pThis, pThis->u.x86.auRegs[X86_GREG_xSP], &pThis->u.x86.auSegs[X86_SREG_CS]);
385 pThis->u.x86.auRegs[X86_GREG_xSP] += 8; /* CS */
386
387 dbgUnwindLoadStackU64(pThis, pThis->u.x86.auRegs[X86_GREG_xSP], &pThis->u.x86.uRFlags);
388 pThis->u.x86.auRegs[X86_GREG_xSP] += 8; /* EFLAGS */
389
390 uint64_t uNewRsp = (pThis->u.x86.auRegs[X86_GREG_xSP] - 8) & ~(uint64_t)15;
391 dbgUnwindLoadStackU64(pThis, pThis->u.x86.auRegs[X86_GREG_xSP], &uNewRsp);
392 pThis->u.x86.auRegs[X86_GREG_xSP] += 8; /* RSP */
393
394 dbgUnwindLoadStackU16(pThis, pThis->u.x86.auRegs[X86_GREG_xSP], &pThis->u.x86.auSegs[X86_SREG_SS]);
395 pThis->u.x86.auRegs[X86_GREG_xSP] += 8; /* SS */
396
397 pThis->u.x86.auRegs[X86_GREG_xSP] = uNewRsp;
398
399 pThis->u.x86.Loaded.s.fRegs |= RT_BIT(X86_GREG_xSP);
400 pThis->u.x86.Loaded.s.fSegs |= RT_BIT(X86_SREG_CS) | RT_BIT(X86_SREG_SS);
401 pThis->u.x86.Loaded.s.fPc = 1;
402 pThis->u.x86.Loaded.s.fFrameAddr = 1;
403 pThis->u.x86.Loaded.s.fRFlags = 1;
404 return true;
405}
406
407
408/**
409 * Unwinds one frame using cached module info.
410 *
411 * @returns true on success, false on failure.
412 * @param hMod The debug module to retrieve unwind info from.
413 * @param paFunctions The table to lookup @a uRvaRip in.
414 * @param cFunctions Size of the lookup table.
415 * @param uRvaRip The RVA of the RIP.
416 *
417 * @todo Move this down to IPRT in the ldrPE.cpp / dbgmodcodeview.cpp area.
418 */
419static bool dbgUnwindPeAmd64DoOne(RTDBGMOD hMod, PCIMAGE_RUNTIME_FUNCTION_ENTRY paFunctions, size_t cFunctions,
420 PDBGFUNWINDSTATE pThis, uint32_t uRvaRip)
421{
422 /*
423 * Lookup the unwind info RVA and try read it.
424 */
425 PCIMAGE_RUNTIME_FUNCTION_ENTRY pEntry = dbgfR3UnwindCtxLookupUnwindInfoRva(paFunctions, cFunctions, uRvaRip);
426 if (pEntry)
427 {
428 IMAGE_RUNTIME_FUNCTION_ENTRY ChainedEntry;
429 unsigned iFrameReg = ~0U;
430 unsigned offFrameReg = 0;
431
432 int fInEpilog = -1; /* -1: not-determined-assume-false; 0: false; 1: true. */
433 uint8_t cbEpilog = 0;
434 uint8_t offEpilog = UINT8_MAX;
435 for (unsigned cChainLoops = 0; ; cChainLoops++)
436 {
437 /*
438 * Get the info.
439 */
440 union
441 {
442 uint32_t uRva;
443 uint8_t ab[ RT_OFFSETOF(IMAGE_UNWIND_INFO, aOpcodes)
444 + sizeof(IMAGE_UNWIND_CODE) * 256
445 + sizeof(IMAGE_RUNTIME_FUNCTION_ENTRY)];
446 } uBuf;
447
448 uBuf.uRva = pEntry->UnwindInfoAddress;
449 size_t cbBuf = sizeof(uBuf);
450 int rc = RTDbgModImageQueryProp(hMod, RTLDRPROP_UNWIND_INFO, &uBuf, cbBuf, &cbBuf);
451 if (RT_FAILURE(rc))
452 return false;
453
454 /*
455 * Check the info.
456 */
457 ASMCompilerBarrier(); /* we're aliasing */
458 PCIMAGE_UNWIND_INFO pInfo = (PCIMAGE_UNWIND_INFO)&uBuf;
459
460 if (pInfo->Version != 1 && pInfo->Version != 2)
461 return false;
462
463 /*
464 * Execute the opcodes.
465 */
466 unsigned const cOpcodes = pInfo->CountOfCodes;
467 unsigned iOpcode = 0;
468
469 /*
470 * Check for epilog opcodes at the start and see if we're in an epilog.
471 */
472 if ( pInfo->Version >= 2
473 && iOpcode < cOpcodes
474 && pInfo->aOpcodes[iOpcode].u.UnwindOp == IMAGE_AMD64_UWOP_EPILOG)
475 {
476 if (fInEpilog == -1)
477 {
478 cbEpilog = pInfo->aOpcodes[iOpcode].u.CodeOffset;
479 Assert(cbEpilog > 0);
480
481 uint32_t uRvaEpilog = pEntry->EndAddress - cbEpilog;
482 iOpcode++;
483 if ( (pInfo->aOpcodes[iOpcode - 1].u.OpInfo & 1)
484 && uRvaRip >= uRvaEpilog)
485 {
486 offEpilog = uRvaRip - uRvaEpilog;
487 fInEpilog = 1;
488 }
489 else
490 {
491 fInEpilog = 0;
492 while (iOpcode < cOpcodes && pInfo->aOpcodes[iOpcode].u.UnwindOp == IMAGE_AMD64_UWOP_EPILOG)
493 {
494 uRvaEpilog = pEntry->EndAddress
495 - (pInfo->aOpcodes[iOpcode].u.CodeOffset + (pInfo->aOpcodes[iOpcode].u.OpInfo << 8));
496 iOpcode++;
497 if (uRvaRip - uRvaEpilog < cbEpilog)
498 {
499 offEpilog = uRvaRip - uRvaEpilog;
500 fInEpilog = 1;
501 break;
502 }
503 }
504 }
505 }
506 while (iOpcode < cOpcodes && pInfo->aOpcodes[iOpcode].u.UnwindOp == IMAGE_AMD64_UWOP_EPILOG)
507 iOpcode++;
508 }
509 if (fInEpilog != 1)
510 {
511 /*
512 * Skip opcodes that doesn't apply to us if we're in the prolog.
513 */
514 uint32_t offPc = uRvaRip - pEntry->BeginAddress;
515 if (offPc < pInfo->SizeOfProlog)
516 while (iOpcode < cOpcodes && pInfo->aOpcodes[iOpcode].u.CodeOffset > offPc)
517 iOpcode++;
518
519 /*
520 * Execute the opcodes.
521 */
522 if (pInfo->FrameRegister != 0)
523 {
524 iFrameReg = pInfo->FrameRegister;
525 offFrameReg = pInfo->FrameOffset * 16;
526 }
527 while (iOpcode < cOpcodes)
528 {
529 Assert(pInfo->aOpcodes[iOpcode].u.CodeOffset <= offPc);
530 uint8_t const uOpInfo = pInfo->aOpcodes[iOpcode].u.OpInfo;
531 uint8_t const uUnwindOp = pInfo->aOpcodes[iOpcode].u.UnwindOp;
532 switch (uUnwindOp)
533 {
534 case IMAGE_AMD64_UWOP_PUSH_NONVOL:
535 pThis->u.x86.auRegs[X86_GREG_xSP] += 8;
536 dbgUnwindLoadStackU64(pThis, pThis->u.x86.auRegs[X86_GREG_xSP], &pThis->u.x86.auRegs[uOpInfo]);
537 pThis->u.x86.Loaded.s.fRegs |= RT_BIT(uOpInfo);
538 iOpcode++;
539 break;
540
541 case IMAGE_AMD64_UWOP_ALLOC_LARGE:
542 if (uOpInfo == 0)
543 {
544 iOpcode += 2;
545 AssertBreak(iOpcode <= cOpcodes);
546 pThis->u.x86.auRegs[X86_GREG_xSP] += pInfo->aOpcodes[iOpcode - 1].FrameOffset * 8;
547 }
548 else
549 {
550 iOpcode += 3;
551 AssertBreak(iOpcode <= cOpcodes);
552 pThis->u.x86.auRegs[X86_GREG_xSP] += RT_MAKE_U32(pInfo->aOpcodes[iOpcode - 2].FrameOffset,
553 pInfo->aOpcodes[iOpcode - 1].FrameOffset);
554 }
555 break;
556
557 case IMAGE_AMD64_UWOP_ALLOC_SMALL:
558 AssertBreak(iOpcode <= cOpcodes);
559 pThis->u.x86.auRegs[X86_GREG_xSP] += uOpInfo * 8 + 8;
560 iOpcode++;
561 break;
562
563 case IMAGE_AMD64_UWOP_SET_FPREG:
564 iFrameReg = uOpInfo;
565 offFrameReg = pInfo->FrameOffset * 16;
566 iOpcode++;
567 break;
568
569 case IMAGE_AMD64_UWOP_SAVE_NONVOL:
570 case IMAGE_AMD64_UWOP_SAVE_NONVOL_FAR:
571 {
572 uint32_t off = 0;
573 iOpcode++;
574 if (iOpcode < cOpcodes)
575 {
576 off = pInfo->aOpcodes[iOpcode].FrameOffset;
577 iOpcode++;
578 if (uUnwindOp == IMAGE_AMD64_UWOP_SAVE_NONVOL_FAR && iOpcode < cOpcodes)
579 {
580 off |= (uint32_t)pInfo->aOpcodes[iOpcode].FrameOffset << 16;
581 iOpcode++;
582 }
583 }
584 off *= 8;
585 dbgUnwindLoadStackU64(pThis, pThis->u.x86.auRegs[X86_GREG_xSP] + off, &pThis->u.x86.auRegs[uOpInfo]);
586 pThis->u.x86.Loaded.s.fRegs |= RT_BIT(uOpInfo);
587 break;
588 }
589
590 case IMAGE_AMD64_UWOP_SAVE_XMM128:
591 iOpcode += 2;
592 break;
593
594 case IMAGE_AMD64_UWOP_SAVE_XMM128_FAR:
595 iOpcode += 3;
596 break;
597
598 case IMAGE_AMD64_UWOP_PUSH_MACHFRAME:
599 return dbgUnwindPeAmd64DoOneIRet(pThis, uOpInfo);
600
601 case IMAGE_AMD64_UWOP_EPILOG:
602 iOpcode += 1;
603 break;
604
605 case IMAGE_AMD64_UWOP_RESERVED_7:
606 AssertFailedReturn(false);
607
608 default:
609 AssertMsgFailedReturn(("%u\n", uUnwindOp), false);
610 }
611 }
612 }
613 else
614 {
615 /*
616 * We're in the POP sequence of an epilog. The POP sequence should
617 * mirror the PUSH sequence exactly.
618 *
619 * Note! We should only end up here for the initial frame (just consider
620 * RSP, stack allocations, non-volatile register restores, ++).
621 */
622 while (iOpcode < cOpcodes)
623 {
624 uint8_t const uOpInfo = pInfo->aOpcodes[iOpcode].u.OpInfo;
625 uint8_t const uUnwindOp = pInfo->aOpcodes[iOpcode].u.UnwindOp;
626 switch (uUnwindOp)
627 {
628 case IMAGE_AMD64_UWOP_PUSH_NONVOL:
629 pThis->u.x86.auRegs[X86_GREG_xSP] += 8;
630 if (offEpilog == 0)
631 {
632 dbgUnwindLoadStackU64(pThis, pThis->u.x86.auRegs[X86_GREG_xSP], &pThis->u.x86.auRegs[uOpInfo]);
633 pThis->u.x86.Loaded.s.fRegs |= RT_BIT(uOpInfo);
634 }
635 else
636 {
637 /* Decrement offEpilog by estimated POP instruction length. */
638 offEpilog -= 1;
639 if (offEpilog > 0 && uOpInfo >= 8)
640 offEpilog -= 1;
641 }
642 iOpcode++;
643 break;
644
645 case IMAGE_AMD64_UWOP_PUSH_MACHFRAME: /* Must terminate an epilog, so always execute this. */
646 return dbgUnwindPeAmd64DoOneIRet(pThis, uOpInfo);
647
648 case IMAGE_AMD64_UWOP_ALLOC_SMALL:
649 case IMAGE_AMD64_UWOP_SET_FPREG:
650 case IMAGE_AMD64_UWOP_EPILOG:
651 iOpcode++;
652 break;
653 case IMAGE_AMD64_UWOP_SAVE_NONVOL:
654 case IMAGE_AMD64_UWOP_SAVE_XMM128:
655 iOpcode += 2;
656 break;
657 case IMAGE_AMD64_UWOP_ALLOC_LARGE:
658 case IMAGE_AMD64_UWOP_SAVE_NONVOL_FAR:
659 case IMAGE_AMD64_UWOP_SAVE_XMM128_FAR:
660 iOpcode += 3;
661 break;
662
663 default:
664 AssertMsgFailedReturn(("%u\n", uUnwindOp), false);
665 }
666 }
667 }
668
669 /*
670 * Chained stuff?
671 */
672 if (!(pInfo->Flags & IMAGE_UNW_FLAGS_CHAININFO))
673 break;
674 ChainedEntry = *(PCIMAGE_RUNTIME_FUNCTION_ENTRY)&pInfo->aOpcodes[(cOpcodes + 1) & ~1];
675 pEntry = &ChainedEntry;
676 AssertReturn(cChainLoops < 32, false);
677 }
678
679 /*
680 * RSP should now give us the return address, so perform a RET.
681 */
682 pThis->enmRetType = DBGFRETURNTYPE_NEAR64;
683
684 pThis->u.x86.FrameAddr.off = pThis->u.x86.auRegs[X86_GREG_xSP] - /* pretend rbp is pushed on the stack */ 8;
685 pThis->u.x86.FrameAddr.sel = pThis->u.x86.auSegs[X86_SREG_SS];
686 pThis->u.x86.Loaded.s.fFrameAddr = 1;
687
688 dbgUnwindLoadStackU64(pThis, pThis->u.x86.auRegs[X86_GREG_xSP], &pThis->uPc);
689 pThis->u.x86.auRegs[X86_GREG_xSP] += 8;
690 pThis->u.x86.Loaded.s.fPc = 1;
691 return true;
692 }
693
694 return false;
695}
696
697
698/**
699 * Tries to unwind one frame using unwind info.
700 *
701 * @returns true on success, false on failure.
702 * @param pUnwindCtx The unwind context.
703 */
704static bool dbgfR3UnwindCtxDoOneFrame(PDBGFUNWINDCTX pUnwindCtx)
705{
706 /*
707 * Hope for the same module as last time around.
708 */
709 RTUINTPTR offCache = pUnwindCtx->m_State.uPc - pUnwindCtx->m_uCachedMapping;
710 if (offCache < pUnwindCtx->m_cbCachedMapping)
711 return dbgUnwindPeAmd64DoOne(pUnwindCtx->m_hCached, pUnwindCtx->m_paFunctions, pUnwindCtx->m_cFunctions,
712 &pUnwindCtx->m_State, offCache);
713
714 /*
715 * Try locate the module.
716 */
717 RTDBGMOD hDbgMod = NIL_RTDBGMOD;
718 RTUINTPTR uBase = 0;
719 RTDBGSEGIDX idxSeg = NIL_RTDBGSEGIDX;
720 int rc = RTDbgAsModuleByAddr(pUnwindCtx->m_hAs, pUnwindCtx->m_State.uPc, &hDbgMod, &uBase, &idxSeg);
721 if (RT_SUCCESS(rc))
722 {
723 /* We cache the module regardless of unwind info. */
724 dbgfR3UnwindCtxFlushCache(pUnwindCtx);
725 pUnwindCtx->m_hCached = hDbgMod;
726 pUnwindCtx->m_uCachedMapping = uBase;
727 pUnwindCtx->m_cbCachedMapping = idxSeg == NIL_RTDBGSEGIDX ? RTDbgModImageSize(hDbgMod)
728 : RTDbgModSegmentSize(hDbgMod, idxSeg);
729
730 /* Play simple for now. */
731 if ( idxSeg == NIL_RTDBGSEGIDX
732 && RTDbgModImageGetFormat(hDbgMod) == RTLDRFMT_PE
733 && RTDbgModImageGetArch(hDbgMod) == RTLDRARCH_AMD64)
734 {
735 /*
736 * Try query the unwind data.
737 */
738 uint32_t uDummy;
739 size_t cbNeeded = 0;
740 rc = RTDbgModImageQueryProp(hDbgMod, RTLDRPROP_UNWIND_TABLE, &uDummy, 0, &cbNeeded);
741 if ( rc == VERR_BUFFER_OVERFLOW
742 && cbNeeded >= sizeof(*pUnwindCtx->m_paFunctions)
743 && cbNeeded < _64M)
744 {
745 void *pvBuf = RTMemAllocZ(cbNeeded + 32);
746 if (pvBuf)
747 {
748 rc = RTDbgModImageQueryProp(hDbgMod, RTLDRPROP_UNWIND_TABLE, pvBuf, cbNeeded + 32, &cbNeeded);
749 if (RT_SUCCESS(rc))
750 {
751 pUnwindCtx->m_pbCachedInfo = (uint8_t *)pvBuf;
752 pUnwindCtx->m_cbCachedInfo = cbNeeded;
753 pUnwindCtx->m_paFunctions = (PCIMAGE_RUNTIME_FUNCTION_ENTRY)pvBuf;
754 pUnwindCtx->m_cFunctions = cbNeeded / sizeof(*pUnwindCtx->m_paFunctions);
755
756 return dbgUnwindPeAmd64DoOne(pUnwindCtx->m_hCached, pUnwindCtx->m_paFunctions, pUnwindCtx->m_cFunctions,
757 &pUnwindCtx->m_State, pUnwindCtx->m_State.uPc - pUnwindCtx->m_uCachedMapping);
758 }
759 RTMemFree(pvBuf);
760 }
761 }
762 }
763 }
764 return false;
765}
766
767
768/**
769 * Read stack memory, will init entire buffer.
770 */
771DECLINLINE(int) dbgfR3StackRead(PUVM pUVM, VMCPUID idCpu, void *pvBuf, PCDBGFADDRESS pSrcAddr, size_t cb, size_t *pcbRead)
772{
773 int rc = DBGFR3MemRead(pUVM, idCpu, pSrcAddr, pvBuf, cb);
774 if (RT_FAILURE(rc))
775 {
776 /* fallback: byte by byte and zero the ones we fail to read. */
777 size_t cbRead;
778 for (cbRead = 0; cbRead < cb; cbRead++)
779 {
780 DBGFADDRESS Addr = *pSrcAddr;
781 rc = DBGFR3MemRead(pUVM, idCpu, DBGFR3AddrAdd(&Addr, cbRead), (uint8_t *)pvBuf + cbRead, 1);
782 if (RT_FAILURE(rc))
783 break;
784 }
785 if (cbRead)
786 rc = VINF_SUCCESS;
787 memset((char *)pvBuf + cbRead, 0, cb - cbRead);
788 *pcbRead = cbRead;
789 }
790 else
791 *pcbRead = cb;
792 return rc;
793}
794
795static int dbgfR3StackWalkCollectRegisterChanges(PUVM pUVM, PDBGFSTACKFRAME pFrame, PDBGFUNWINDSTATE pState)
796{
797 pFrame->cSureRegs = 0;
798 pFrame->paSureRegs = NULL;
799
800 if ( pState->enmArch == RTLDRARCH_AMD64
801 || pState->enmArch == RTLDRARCH_X86_32
802 || pState->enmArch == RTLDRARCH_X86_16)
803 {
804 if (pState->u.x86.Loaded.fAll)
805 {
806 /*
807 * Count relevant registers.
808 */
809 uint32_t cRegs = 0;
810 if (pState->u.x86.Loaded.s.fRegs)
811 for (uint32_t f = 1; f < RT_BIT_32(RT_ELEMENTS(pState->u.x86.auRegs)); f <<= 1)
812 if (pState->u.x86.Loaded.s.fRegs & f)
813 cRegs++;
814 if (pState->u.x86.Loaded.s.fSegs)
815 for (uint32_t f = 1; f < RT_BIT_32(RT_ELEMENTS(pState->u.x86.auSegs)); f <<= 1)
816 if (pState->u.x86.Loaded.s.fSegs & f)
817 cRegs++;
818 if (pState->u.x86.Loaded.s.fRFlags)
819 cRegs++;
820 if (pState->u.x86.Loaded.s.fErrCd)
821 cRegs++;
822 if (cRegs > 0)
823 {
824 /*
825 * Allocate the arrays.
826 */
827 PDBGFREGVALEX paSureRegs = (PDBGFREGVALEX)MMR3HeapAllocZU(pUVM, MM_TAG_DBGF_STACK, sizeof(DBGFREGVALEX) * cRegs);
828 AssertReturn(paSureRegs, VERR_NO_MEMORY);
829 pFrame->paSureRegs = paSureRegs;
830 pFrame->cSureRegs = cRegs;
831
832 /*
833 * Popuplate the arrays.
834 */
835 uint32_t iReg = 0;
836 if (pState->u.x86.Loaded.s.fRegs)
837 for (uint32_t i = 1; i < RT_ELEMENTS(pState->u.x86.auRegs); i++)
838 if (pState->u.x86.Loaded.s.fRegs & RT_BIT(i))
839 {
840 paSureRegs[iReg].Value.u64 = pState->u.x86.auRegs[i];
841 paSureRegs[iReg].enmType = DBGFREGVALTYPE_U64;
842 paSureRegs[iReg].enmReg = (DBGFREG)(DBGFREG_RAX + i);
843 iReg++;
844 }
845
846 if (pState->u.x86.Loaded.s.fSegs)
847 for (uint32_t i = 1; i < RT_ELEMENTS(pState->u.x86.auSegs); i++)
848 if (pState->u.x86.Loaded.s.fSegs & RT_BIT(i))
849 {
850 paSureRegs[iReg].Value.u16 = pState->u.x86.auSegs[i];
851 paSureRegs[iReg].enmType = DBGFREGVALTYPE_U16;
852 switch (i)
853 {
854 case X86_SREG_ES: paSureRegs[iReg].enmReg = DBGFREG_ES; break;
855 case X86_SREG_CS: paSureRegs[iReg].enmReg = DBGFREG_CS; break;
856 case X86_SREG_SS: paSureRegs[iReg].enmReg = DBGFREG_SS; break;
857 case X86_SREG_DS: paSureRegs[iReg].enmReg = DBGFREG_DS; break;
858 case X86_SREG_FS: paSureRegs[iReg].enmReg = DBGFREG_FS; break;
859 case X86_SREG_GS: paSureRegs[iReg].enmReg = DBGFREG_GS; break;
860 default: AssertFailedBreak();
861 }
862 iReg++;
863 }
864
865 if (iReg < cRegs)
866 {
867 if (pState->u.x86.Loaded.s.fRFlags)
868 {
869 paSureRegs[iReg].Value.u64 = pState->u.x86.uRFlags;
870 paSureRegs[iReg].enmType = DBGFREGVALTYPE_U64;
871 paSureRegs[iReg].enmReg = DBGFREG_RFLAGS;
872 iReg++;
873 }
874 if (pState->u.x86.Loaded.s.fErrCd)
875 {
876 paSureRegs[iReg].Value.u64 = pState->u.x86.uErrCd;
877 paSureRegs[iReg].enmType = DBGFREGVALTYPE_U64;
878 paSureRegs[iReg].enmReg = DBGFREG_END;
879 paSureRegs[iReg].pszName = "trap-errcd";
880 iReg++;
881 }
882 }
883 Assert(iReg == cRegs);
884 }
885 }
886 }
887
888 return VINF_SUCCESS;
889}
890
891
892/**
893 * Internal worker routine.
894 *
895 * On x86 the typical stack frame layout is like this:
896 * .. ..
897 * 16 parameter 2
898 * 12 parameter 1
899 * 8 parameter 0
900 * 4 return address
901 * 0 old ebp; current ebp points here
902 */
903DECL_NO_INLINE(static, int) dbgfR3StackWalk(PDBGFUNWINDCTX pUnwindCtx, PDBGFSTACKFRAME pFrame, bool fFirst)
904{
905 /*
906 * Stop if we got a read error in the previous run.
907 */
908 if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_LAST)
909 return VERR_NO_MORE_FILES;
910
911 /*
912 * Advance the frame (except for the first).
913 */
914 if (!fFirst) /** @todo we can probably eliminate this fFirst business... */
915 {
916 /* frame, pc and stack is taken from the existing frames return members. */
917 pFrame->AddrFrame = pFrame->AddrReturnFrame;
918 pFrame->AddrPC = pFrame->AddrReturnPC;
919 pFrame->pSymPC = pFrame->pSymReturnPC;
920 pFrame->pLinePC = pFrame->pLineReturnPC;
921
922 /* increment the frame number. */
923 pFrame->iFrame++;
924
925 /* UNWIND_INFO_RET -> USED_UNWIND; return type */
926 if (!(pFrame->fFlags & DBGFSTACKFRAME_FLAGS_UNWIND_INFO_RET))
927 pFrame->fFlags &= ~DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO;
928 else
929 {
930 pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO;
931 pFrame->fFlags &= ~DBGFSTACKFRAME_FLAGS_UNWIND_INFO_RET;
932 if (pFrame->enmReturnFrameReturnType != DBGFRETURNTYPE_INVALID)
933 {
934 pFrame->enmReturnType = pFrame->enmReturnFrameReturnType;
935 pFrame->enmReturnFrameReturnType = DBGFRETURNTYPE_INVALID;
936 }
937 }
938 }
939
940 /*
941 * Enagage the OS layer and collect register changes.
942 */
943 if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO)
944 {
945 /** @todo engage the OS buggers to identify trap frames and update unwindctx accordingly. */
946 if (!fFirst)
947 {
948 int rc = dbgfR3StackWalkCollectRegisterChanges(pUnwindCtx->m_pUVM, pFrame, &pUnwindCtx->m_State);
949 if (RT_FAILURE(rc))
950 return rc;
951 }
952 }
953
954 /*
955 * Figure the return address size and use the old PC to guess stack item size.
956 */
957 /** @todo this is bogus... */
958 unsigned cbRetAddr = DBGFReturnTypeSize(pFrame->enmReturnType);
959 unsigned cbStackItem;
960 switch (pFrame->AddrPC.fFlags & DBGFADDRESS_FLAGS_TYPE_MASK)
961 {
962 case DBGFADDRESS_FLAGS_FAR16: cbStackItem = 2; break;
963 case DBGFADDRESS_FLAGS_FAR32: cbStackItem = 4; break;
964 case DBGFADDRESS_FLAGS_FAR64: cbStackItem = 8; break;
965 case DBGFADDRESS_FLAGS_RING0: cbStackItem = sizeof(RTHCUINTPTR); break;
966 default:
967 switch (pFrame->enmReturnType)
968 {
969 case DBGFRETURNTYPE_FAR16:
970 case DBGFRETURNTYPE_IRET16:
971 case DBGFRETURNTYPE_IRET32_V86:
972 case DBGFRETURNTYPE_NEAR16: cbStackItem = 2; break;
973
974 case DBGFRETURNTYPE_FAR32:
975 case DBGFRETURNTYPE_IRET32:
976 case DBGFRETURNTYPE_IRET32_PRIV:
977 case DBGFRETURNTYPE_NEAR32: cbStackItem = 4; break;
978
979 case DBGFRETURNTYPE_FAR64:
980 case DBGFRETURNTYPE_IRET64:
981 case DBGFRETURNTYPE_NEAR64: cbStackItem = 8; break;
982
983 default:
984 AssertMsgFailed(("%d\n", pFrame->enmReturnType));
985 cbStackItem = 4;
986 break;
987 }
988 }
989
990 /*
991 * Read the raw frame data.
992 * We double cbRetAddr in case we have a far return.
993 */
994 union
995 {
996 uint64_t *pu64;
997 uint32_t *pu32;
998 uint16_t *pu16;
999 uint8_t *pb;
1000 void *pv;
1001 } u, uRet, uArgs, uBp;
1002 size_t cbRead = cbRetAddr*2 + cbStackItem + sizeof(pFrame->Args);
1003 u.pv = alloca(cbRead);
1004 uBp = u;
1005 uRet.pb = u.pb + cbStackItem;
1006 uArgs.pb = u.pb + cbStackItem + cbRetAddr;
1007
1008 Assert(DBGFADDRESS_IS_VALID(&pFrame->AddrFrame));
1009 int rc = dbgfR3StackRead(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, u.pv, &pFrame->AddrFrame, cbRead, &cbRead);
1010 if ( RT_FAILURE(rc)
1011 || cbRead < cbRetAddr + cbStackItem)
1012 pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_LAST;
1013
1014 /*
1015 * Return Frame address.
1016 *
1017 * If we used unwind info to get here, the unwind register context will be
1018 * positioned after the return instruction has been executed. We start by
1019 * picking up the rBP register here for return frame and will try improve
1020 * on it further down by using unwind info.
1021 */
1022 pFrame->AddrReturnFrame = pFrame->AddrFrame;
1023 if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO)
1024 {
1025 if ( pFrame->enmReturnType == DBGFRETURNTYPE_IRET32_PRIV
1026 || pFrame->enmReturnType == DBGFRETURNTYPE_IRET64)
1027 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnFrame,
1028 pUnwindCtx->m_State.u.x86.auSegs[X86_SREG_SS], pUnwindCtx->m_State.u.x86.auRegs[X86_GREG_xBP]);
1029 else if (pFrame->enmReturnType == DBGFRETURNTYPE_IRET32_V86)
1030 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &pFrame->AddrReturnFrame,
1031 ((uint32_t)pUnwindCtx->m_State.u.x86.auSegs[X86_SREG_SS] << 4)
1032 + pUnwindCtx->m_State.u.x86.auRegs[X86_GREG_xBP]);
1033 else
1034 {
1035 pFrame->AddrReturnFrame.off = pUnwindCtx->m_State.u.x86.auRegs[X86_GREG_xBP];
1036 pFrame->AddrReturnFrame.FlatPtr += pFrame->AddrReturnFrame.off - pFrame->AddrFrame.off;
1037 }
1038 }
1039 else
1040 {
1041 switch (cbStackItem)
1042 {
1043 case 2: pFrame->AddrReturnFrame.off = *uBp.pu16; break;
1044 case 4: pFrame->AddrReturnFrame.off = *uBp.pu32; break;
1045 case 8: pFrame->AddrReturnFrame.off = *uBp.pu64; break;
1046 default: AssertMsgFailedReturn(("cbStackItem=%d\n", cbStackItem), VERR_DBGF_STACK_IPE_1);
1047 }
1048
1049 /* Watcom tries to keep the frame pointer odd for far returns. */
1050 if ( cbStackItem <= 4
1051 && !(pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO))
1052 {
1053 if (pFrame->AddrReturnFrame.off & 1)
1054 {
1055 pFrame->AddrReturnFrame.off &= ~(RTGCUINTPTR)1;
1056 if (pFrame->enmReturnType == DBGFRETURNTYPE_NEAR16)
1057 {
1058 pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_USED_ODD_EVEN;
1059 pFrame->enmReturnType = DBGFRETURNTYPE_FAR16;
1060 cbRetAddr = 4;
1061 }
1062 else if (pFrame->enmReturnType == DBGFRETURNTYPE_NEAR32)
1063 {
1064 pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_USED_ODD_EVEN;
1065 pFrame->enmReturnType = DBGFRETURNTYPE_FAR32;
1066 cbRetAddr = 8;
1067 }
1068 }
1069 else if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_ODD_EVEN)
1070 {
1071 if (pFrame->enmReturnType == DBGFRETURNTYPE_FAR16)
1072 {
1073 pFrame->enmReturnType = DBGFRETURNTYPE_NEAR16;
1074 cbRetAddr = 2;
1075 }
1076 else if (pFrame->enmReturnType == DBGFRETURNTYPE_NEAR32)
1077 {
1078 pFrame->enmReturnType = DBGFRETURNTYPE_FAR32;
1079 cbRetAddr = 4;
1080 }
1081 pFrame->fFlags &= ~DBGFSTACKFRAME_FLAGS_USED_ODD_EVEN;
1082 }
1083 uArgs.pb = u.pb + cbStackItem + cbRetAddr;
1084 }
1085
1086 pFrame->AddrReturnFrame.FlatPtr += pFrame->AddrReturnFrame.off - pFrame->AddrFrame.off;
1087 }
1088
1089 /*
1090 * Return Stack Address.
1091 */
1092 pFrame->AddrReturnStack = pFrame->AddrReturnFrame;
1093 if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO)
1094 {
1095 if ( pFrame->enmReturnType == DBGFRETURNTYPE_IRET32_PRIV
1096 || pFrame->enmReturnType == DBGFRETURNTYPE_IRET64)
1097 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnStack,
1098 pUnwindCtx->m_State.u.x86.auSegs[X86_SREG_SS], pUnwindCtx->m_State.u.x86.auRegs[X86_GREG_xSP]);
1099 else if (pFrame->enmReturnType == DBGFRETURNTYPE_IRET32_V86)
1100 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &pFrame->AddrReturnStack,
1101 ((uint32_t)pUnwindCtx->m_State.u.x86.auSegs[X86_SREG_SS] << 4)
1102 + pUnwindCtx->m_State.u.x86.auRegs[X86_GREG_xSP]);
1103 else
1104 {
1105 pFrame->AddrReturnStack.off = pUnwindCtx->m_State.u.x86.auRegs[X86_GREG_xSP];
1106 pFrame->AddrReturnStack.FlatPtr += pFrame->AddrReturnStack.off - pFrame->AddrStack.off;
1107 }
1108 }
1109 else
1110 {
1111 pFrame->AddrReturnStack.off += cbStackItem + cbRetAddr;
1112 pFrame->AddrReturnStack.FlatPtr += cbStackItem + cbRetAddr;
1113 }
1114
1115 /*
1116 * Return PC.
1117 */
1118 pFrame->AddrReturnPC = pFrame->AddrPC;
1119 if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO)
1120 {
1121 if (DBGFReturnTypeIsNear(pFrame->enmReturnType))
1122 {
1123 pFrame->AddrReturnPC.off = pUnwindCtx->m_State.uPc;
1124 pFrame->AddrReturnPC.FlatPtr += pFrame->AddrReturnPC.off - pFrame->AddrPC.off;
1125 }
1126 else
1127 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC,
1128 pUnwindCtx->m_State.u.x86.auSegs[X86_SREG_CS], pUnwindCtx->m_State.uPc);
1129 }
1130 else
1131 switch (pFrame->enmReturnType)
1132 {
1133 case DBGFRETURNTYPE_NEAR16:
1134 if (DBGFADDRESS_IS_VALID(&pFrame->AddrReturnPC))
1135 {
1136 pFrame->AddrReturnPC.FlatPtr += *uRet.pu16 - pFrame->AddrReturnPC.off;
1137 pFrame->AddrReturnPC.off = *uRet.pu16;
1138 }
1139 else
1140 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &pFrame->AddrReturnPC, *uRet.pu16);
1141 break;
1142 case DBGFRETURNTYPE_NEAR32:
1143 if (DBGFADDRESS_IS_VALID(&pFrame->AddrReturnPC))
1144 {
1145 pFrame->AddrReturnPC.FlatPtr += *uRet.pu32 - pFrame->AddrReturnPC.off;
1146 pFrame->AddrReturnPC.off = *uRet.pu32;
1147 }
1148 else
1149 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &pFrame->AddrReturnPC, *uRet.pu32);
1150 break;
1151 case DBGFRETURNTYPE_NEAR64:
1152 if (DBGFADDRESS_IS_VALID(&pFrame->AddrReturnPC))
1153 {
1154 pFrame->AddrReturnPC.FlatPtr += *uRet.pu64 - pFrame->AddrReturnPC.off;
1155 pFrame->AddrReturnPC.off = *uRet.pu64;
1156 }
1157 else
1158 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &pFrame->AddrReturnPC, *uRet.pu64);
1159 break;
1160 case DBGFRETURNTYPE_FAR16:
1161 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[1], uRet.pu16[0]);
1162 break;
1163 case DBGFRETURNTYPE_FAR32:
1164 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[2], uRet.pu32[0]);
1165 break;
1166 case DBGFRETURNTYPE_FAR64:
1167 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[4], uRet.pu64[0]);
1168 break;
1169 case DBGFRETURNTYPE_IRET16:
1170 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[1], uRet.pu16[0]);
1171 break;
1172 case DBGFRETURNTYPE_IRET32:
1173 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[2], uRet.pu32[0]);
1174 break;
1175 case DBGFRETURNTYPE_IRET32_PRIV:
1176 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[2], uRet.pu32[0]);
1177 break;
1178 case DBGFRETURNTYPE_IRET32_V86:
1179 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[2], uRet.pu32[0]);
1180 break;
1181 case DBGFRETURNTYPE_IRET64:
1182 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[4], uRet.pu64[0]);
1183 break;
1184 default:
1185 AssertMsgFailed(("enmReturnType=%d\n", pFrame->enmReturnType));
1186 return VERR_INVALID_PARAMETER;
1187 }
1188
1189
1190 pFrame->pSymReturnPC = DBGFR3AsSymbolByAddrA(pUnwindCtx->m_pUVM, pUnwindCtx->m_hAs, &pFrame->AddrReturnPC,
1191 RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL | RTDBGSYMADDR_FLAGS_SKIP_ABS_IN_DEFERRED,
1192 NULL /*poffDisp*/, NULL /*phMod*/);
1193 pFrame->pLineReturnPC = DBGFR3AsLineByAddrA(pUnwindCtx->m_pUVM, pUnwindCtx->m_hAs, &pFrame->AddrReturnPC,
1194 NULL /*poffDisp*/, NULL /*phMod*/);
1195
1196 /*
1197 * Frame bitness flag.
1198 */
1199 /** @todo use previous return type for this? */
1200 pFrame->fFlags &= ~(DBGFSTACKFRAME_FLAGS_16BIT | DBGFSTACKFRAME_FLAGS_32BIT | DBGFSTACKFRAME_FLAGS_64BIT);
1201 switch (cbStackItem)
1202 {
1203 case 2: pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_16BIT; break;
1204 case 4: pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_32BIT; break;
1205 case 8: pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_64BIT; break;
1206 default: AssertMsgFailedReturn(("cbStackItem=%d\n", cbStackItem), VERR_DBGF_STACK_IPE_2);
1207 }
1208
1209 /*
1210 * The arguments.
1211 */
1212 memcpy(&pFrame->Args, uArgs.pv, sizeof(pFrame->Args));
1213
1214 /*
1215 * Try use unwind information to locate the return frame pointer (for the
1216 * next loop iteration).
1217 */
1218 Assert(!(pFrame->fFlags & DBGFSTACKFRAME_FLAGS_UNWIND_INFO_RET));
1219 pFrame->enmReturnFrameReturnType = DBGFRETURNTYPE_INVALID;
1220 if (!(pFrame->fFlags & DBGFSTACKFRAME_FLAGS_LAST))
1221 {
1222 /* Set PC and SP if we didn't unwind our way here (context will then point
1223 and the return PC and SP already). */
1224 if (!(pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO))
1225 {
1226 dbgfR3UnwindCtxSetPcAndSp(pUnwindCtx, &pFrame->AddrReturnPC, &pFrame->AddrReturnStack);
1227 }
1228 /** @todo Reevaluate CS if the previous frame return type isn't near. */
1229 if ( pUnwindCtx->m_State.enmArch == RTLDRARCH_AMD64
1230 || pUnwindCtx->m_State.enmArch == RTLDRARCH_X86_32
1231 || pUnwindCtx->m_State.enmArch == RTLDRARCH_X86_16)
1232 pUnwindCtx->m_State.u.x86.Loaded.fAll = 0;
1233 else
1234 AssertFailed();
1235 if (dbgfR3UnwindCtxDoOneFrame(pUnwindCtx))
1236 {
1237 DBGFADDRESS AddrReturnFrame = pFrame->AddrReturnFrame;
1238 rc = DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &AddrReturnFrame,
1239 pUnwindCtx->m_State.u.x86.FrameAddr.sel, pUnwindCtx->m_State.u.x86.FrameAddr.off);
1240 if (RT_SUCCESS(rc))
1241 pFrame->AddrReturnFrame = AddrReturnFrame;
1242 pFrame->enmReturnFrameReturnType = pUnwindCtx->m_State.enmRetType;
1243 pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_UNWIND_INFO_RET;
1244 }
1245 }
1246
1247 return VINF_SUCCESS;
1248}
1249
1250
1251/**
1252 * Walks the entire stack allocating memory as we walk.
1253 */
1254static DECLCALLBACK(int) dbgfR3StackWalkCtxFull(PUVM pUVM, VMCPUID idCpu, PCCPUMCTX pCtx, RTDBGAS hAs,
1255 DBGFCODETYPE enmCodeType,
1256 PCDBGFADDRESS pAddrFrame,
1257 PCDBGFADDRESS pAddrStack,
1258 PCDBGFADDRESS pAddrPC,
1259 DBGFRETURNTYPE enmReturnType,
1260 PCDBGFSTACKFRAME *ppFirstFrame)
1261{
1262 DBGFUNWINDCTX UnwindCtx(pUVM, idCpu, pCtx, hAs);
1263
1264 /* alloc first frame. */
1265 PDBGFSTACKFRAME pCur = (PDBGFSTACKFRAME)MMR3HeapAllocZU(pUVM, MM_TAG_DBGF_STACK, sizeof(*pCur));
1266 if (!pCur)
1267 return VERR_NO_MEMORY;
1268
1269 /*
1270 * Initialize the frame.
1271 */
1272 pCur->pNextInternal = NULL;
1273 pCur->pFirstInternal = pCur;
1274
1275 int rc = VINF_SUCCESS;
1276 if (pAddrPC)
1277 pCur->AddrPC = *pAddrPC;
1278 else if (enmCodeType != DBGFCODETYPE_GUEST)
1279 DBGFR3AddrFromFlat(pUVM, &pCur->AddrPC, pCtx->rip);
1280 else
1281 rc = DBGFR3AddrFromSelOff(pUVM, idCpu, &pCur->AddrPC, pCtx->cs.Sel, pCtx->rip);
1282 if (RT_SUCCESS(rc))
1283 {
1284 uint64_t fAddrMask;
1285 if (enmCodeType == DBGFCODETYPE_RING0)
1286 fAddrMask = HC_ARCH_BITS == 64 ? UINT64_MAX : UINT32_MAX;
1287 else if (enmCodeType == DBGFCODETYPE_HYPER)
1288 fAddrMask = UINT32_MAX;
1289 else if (DBGFADDRESS_IS_FAR16(&pCur->AddrPC))
1290 fAddrMask = UINT16_MAX;
1291 else if (DBGFADDRESS_IS_FAR32(&pCur->AddrPC))
1292 fAddrMask = UINT32_MAX;
1293 else if (DBGFADDRESS_IS_FAR64(&pCur->AddrPC))
1294 fAddrMask = UINT64_MAX;
1295 else
1296 {
1297 PVMCPU pVCpu = VMMGetCpuById(pUVM->pVM, idCpu);
1298 CPUMMODE enmCpuMode = CPUMGetGuestMode(pVCpu);
1299 if (enmCpuMode == CPUMMODE_REAL)
1300 {
1301 fAddrMask = UINT16_MAX;
1302 if (enmReturnType == DBGFRETURNTYPE_INVALID)
1303 pCur->enmReturnType = DBGFRETURNTYPE_NEAR16;
1304 }
1305 else if ( enmCpuMode == CPUMMODE_PROTECTED
1306 || !CPUMIsGuestIn64BitCode(pVCpu))
1307 {
1308 fAddrMask = UINT32_MAX;
1309 if (enmReturnType == DBGFRETURNTYPE_INVALID)
1310 pCur->enmReturnType = DBGFRETURNTYPE_NEAR32;
1311 }
1312 else
1313 {
1314 fAddrMask = UINT64_MAX;
1315 if (enmReturnType == DBGFRETURNTYPE_INVALID)
1316 pCur->enmReturnType = DBGFRETURNTYPE_NEAR64;
1317 }
1318 }
1319
1320 if (enmReturnType == DBGFRETURNTYPE_INVALID)
1321 switch (pCur->AddrPC.fFlags & DBGFADDRESS_FLAGS_TYPE_MASK)
1322 {
1323 case DBGFADDRESS_FLAGS_FAR16: pCur->enmReturnType = DBGFRETURNTYPE_NEAR16; break;
1324 case DBGFADDRESS_FLAGS_FAR32: pCur->enmReturnType = DBGFRETURNTYPE_NEAR32; break;
1325 case DBGFADDRESS_FLAGS_FAR64: pCur->enmReturnType = DBGFRETURNTYPE_NEAR64; break;
1326 case DBGFADDRESS_FLAGS_RING0:
1327 pCur->enmReturnType = HC_ARCH_BITS == 64 ? DBGFRETURNTYPE_NEAR64 : DBGFRETURNTYPE_NEAR32;
1328 break;
1329 default:
1330 pCur->enmReturnType = DBGFRETURNTYPE_NEAR32;
1331 break;
1332 }
1333
1334
1335 if (pAddrStack)
1336 pCur->AddrStack = *pAddrStack;
1337 else if (enmCodeType != DBGFCODETYPE_GUEST)
1338 DBGFR3AddrFromFlat(pUVM, &pCur->AddrStack, pCtx->rsp & fAddrMask);
1339 else
1340 rc = DBGFR3AddrFromSelOff(pUVM, idCpu, &pCur->AddrStack, pCtx->ss.Sel, pCtx->rsp & fAddrMask);
1341
1342 Assert(!(pCur->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO));
1343 if (pAddrFrame)
1344 pCur->AddrFrame = *pAddrFrame;
1345 else if ( RT_SUCCESS(rc)
1346 && dbgfR3UnwindCtxSetPcAndSp(&UnwindCtx, &pCur->AddrPC, &pCur->AddrStack)
1347 && dbgfR3UnwindCtxDoOneFrame(&UnwindCtx))
1348 {
1349 pCur->enmReturnType = UnwindCtx.m_State.enmRetType;
1350 pCur->fFlags |= DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO;
1351 rc = DBGFR3AddrFromSelOff(UnwindCtx.m_pUVM, UnwindCtx.m_idCpu, &pCur->AddrFrame,
1352 UnwindCtx.m_State.u.x86.FrameAddr.sel, UnwindCtx.m_State.u.x86.FrameAddr.off);
1353 }
1354 else if (enmCodeType != DBGFCODETYPE_GUEST)
1355 DBGFR3AddrFromFlat(pUVM, &pCur->AddrFrame, pCtx->rbp & fAddrMask);
1356 else if (RT_SUCCESS(rc))
1357 rc = DBGFR3AddrFromSelOff(pUVM, idCpu, &pCur->AddrFrame, pCtx->ss.Sel, pCtx->rbp & fAddrMask);
1358
1359 /*
1360 * The first frame.
1361 */
1362 if (RT_SUCCESS(rc))
1363 {
1364 if (DBGFADDRESS_IS_VALID(&pCur->AddrPC))
1365 {
1366 pCur->pSymPC = DBGFR3AsSymbolByAddrA(pUVM, hAs, &pCur->AddrPC,
1367 RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL | RTDBGSYMADDR_FLAGS_SKIP_ABS_IN_DEFERRED,
1368 NULL /*poffDisp*/, NULL /*phMod*/);
1369 pCur->pLinePC = DBGFR3AsLineByAddrA(pUVM, hAs, &pCur->AddrPC, NULL /*poffDisp*/, NULL /*phMod*/);
1370 }
1371
1372 rc = dbgfR3StackWalk(&UnwindCtx, pCur, true /*fFirst*/);
1373 }
1374 }
1375 else
1376 pCur->enmReturnType = enmReturnType;
1377 if (RT_FAILURE(rc))
1378 {
1379 DBGFR3StackWalkEnd(pCur);
1380 return rc;
1381 }
1382
1383 /*
1384 * The other frames.
1385 */
1386 DBGFSTACKFRAME Next = *pCur;
1387 while (!(pCur->fFlags & (DBGFSTACKFRAME_FLAGS_LAST | DBGFSTACKFRAME_FLAGS_MAX_DEPTH | DBGFSTACKFRAME_FLAGS_LOOP)))
1388 {
1389 Next.cSureRegs = 0;
1390 Next.paSureRegs = NULL;
1391
1392 /* try walk. */
1393 rc = dbgfR3StackWalk(&UnwindCtx, &Next, false /*fFirst*/);
1394 if (RT_FAILURE(rc))
1395 break;
1396
1397 /* add the next frame to the chain. */
1398 PDBGFSTACKFRAME pNext = (PDBGFSTACKFRAME)MMR3HeapAllocU(pUVM, MM_TAG_DBGF_STACK, sizeof(*pNext));
1399 if (!pNext)
1400 {
1401 DBGFR3StackWalkEnd(pCur);
1402 return VERR_NO_MEMORY;
1403 }
1404 *pNext = Next;
1405 pCur->pNextInternal = pNext;
1406 pCur = pNext;
1407 Assert(pCur->pNextInternal == NULL);
1408
1409 /* check for loop */
1410 for (PCDBGFSTACKFRAME pLoop = pCur->pFirstInternal;
1411 pLoop && pLoop != pCur;
1412 pLoop = pLoop->pNextInternal)
1413 if (pLoop->AddrFrame.FlatPtr == pCur->AddrFrame.FlatPtr)
1414 {
1415 pCur->fFlags |= DBGFSTACKFRAME_FLAGS_LOOP;
1416 break;
1417 }
1418
1419 /* check for insane recursion */
1420 if (pCur->iFrame >= 2048)
1421 pCur->fFlags |= DBGFSTACKFRAME_FLAGS_MAX_DEPTH;
1422 }
1423
1424 *ppFirstFrame = pCur->pFirstInternal;
1425 return rc;
1426}
1427
1428
1429/**
1430 * Common worker for DBGFR3StackWalkBeginGuestEx, DBGFR3StackWalkBeginHyperEx,
1431 * DBGFR3StackWalkBeginGuest and DBGFR3StackWalkBeginHyper.
1432 */
1433static int dbgfR3StackWalkBeginCommon(PUVM pUVM,
1434 VMCPUID idCpu,
1435 DBGFCODETYPE enmCodeType,
1436 PCDBGFADDRESS pAddrFrame,
1437 PCDBGFADDRESS pAddrStack,
1438 PCDBGFADDRESS pAddrPC,
1439 DBGFRETURNTYPE enmReturnType,
1440 PCDBGFSTACKFRAME *ppFirstFrame)
1441{
1442 /*
1443 * Validate parameters.
1444 */
1445 *ppFirstFrame = NULL;
1446 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1447 PVM pVM = pUVM->pVM;
1448 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1449 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
1450 if (pAddrFrame)
1451 AssertReturn(DBGFR3AddrIsValid(pUVM, pAddrFrame), VERR_INVALID_PARAMETER);
1452 if (pAddrStack)
1453 AssertReturn(DBGFR3AddrIsValid(pUVM, pAddrStack), VERR_INVALID_PARAMETER);
1454 if (pAddrPC)
1455 AssertReturn(DBGFR3AddrIsValid(pUVM, pAddrPC), VERR_INVALID_PARAMETER);
1456 AssertReturn(enmReturnType >= DBGFRETURNTYPE_INVALID && enmReturnType < DBGFRETURNTYPE_END, VERR_INVALID_PARAMETER);
1457
1458 /*
1459 * Get the CPUM context pointer and pass it on the specified EMT.
1460 */
1461 RTDBGAS hAs;
1462 PCCPUMCTX pCtx;
1463 switch (enmCodeType)
1464 {
1465 case DBGFCODETYPE_GUEST:
1466 pCtx = CPUMQueryGuestCtxPtr(VMMGetCpuById(pVM, idCpu));
1467 hAs = DBGF_AS_GLOBAL;
1468 break;
1469 case DBGFCODETYPE_HYPER:
1470 pCtx = CPUMQueryGuestCtxPtr(VMMGetCpuById(pVM, idCpu));
1471 hAs = DBGF_AS_RC_AND_GC_GLOBAL;
1472 break;
1473 case DBGFCODETYPE_RING0:
1474 pCtx = NULL; /* No valid context present. */
1475 hAs = DBGF_AS_R0;
1476 break;
1477 default:
1478 AssertFailedReturn(VERR_INVALID_PARAMETER);
1479 }
1480 return VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3StackWalkCtxFull, 10,
1481 pUVM, idCpu, pCtx, hAs, enmCodeType,
1482 pAddrFrame, pAddrStack, pAddrPC, enmReturnType, ppFirstFrame);
1483}
1484
1485
1486/**
1487 * Begins a guest stack walk, extended version.
1488 *
1489 * This will walk the current stack, constructing a list of info frames which is
1490 * returned to the caller. The caller uses DBGFR3StackWalkNext to traverse the
1491 * list and DBGFR3StackWalkEnd to release it.
1492 *
1493 * @returns VINF_SUCCESS on success.
1494 * @returns VERR_NO_MEMORY if we're out of memory.
1495 *
1496 * @param pUVM The user mode VM handle.
1497 * @param idCpu The ID of the virtual CPU which stack we want to walk.
1498 * @param enmCodeType Code type
1499 * @param pAddrFrame Frame address to start at. (Optional)
1500 * @param pAddrStack Stack address to start at. (Optional)
1501 * @param pAddrPC Program counter to start at. (Optional)
1502 * @param enmReturnType The return address type. (Optional)
1503 * @param ppFirstFrame Where to return the pointer to the first info frame.
1504 */
1505VMMR3DECL(int) DBGFR3StackWalkBeginEx(PUVM pUVM,
1506 VMCPUID idCpu,
1507 DBGFCODETYPE enmCodeType,
1508 PCDBGFADDRESS pAddrFrame,
1509 PCDBGFADDRESS pAddrStack,
1510 PCDBGFADDRESS pAddrPC,
1511 DBGFRETURNTYPE enmReturnType,
1512 PCDBGFSTACKFRAME *ppFirstFrame)
1513{
1514 return dbgfR3StackWalkBeginCommon(pUVM, idCpu, enmCodeType, pAddrFrame, pAddrStack, pAddrPC, enmReturnType, ppFirstFrame);
1515}
1516
1517
1518/**
1519 * Begins a guest stack walk.
1520 *
1521 * This will walk the current stack, constructing a list of info frames which is
1522 * returned to the caller. The caller uses DBGFR3StackWalkNext to traverse the
1523 * list and DBGFR3StackWalkEnd to release it.
1524 *
1525 * @returns VINF_SUCCESS on success.
1526 * @returns VERR_NO_MEMORY if we're out of memory.
1527 *
1528 * @param pUVM The user mode VM handle.
1529 * @param idCpu The ID of the virtual CPU which stack we want to walk.
1530 * @param enmCodeType Code type
1531 * @param ppFirstFrame Where to return the pointer to the first info frame.
1532 */
1533VMMR3DECL(int) DBGFR3StackWalkBegin(PUVM pUVM, VMCPUID idCpu, DBGFCODETYPE enmCodeType, PCDBGFSTACKFRAME *ppFirstFrame)
1534{
1535 return dbgfR3StackWalkBeginCommon(pUVM, idCpu, enmCodeType, NULL, NULL, NULL, DBGFRETURNTYPE_INVALID, ppFirstFrame);
1536}
1537
1538/**
1539 * Gets the next stack frame.
1540 *
1541 * @returns Pointer to the info for the next stack frame.
1542 * NULL if no more frames.
1543 *
1544 * @param pCurrent Pointer to the current stack frame.
1545 *
1546 */
1547VMMR3DECL(PCDBGFSTACKFRAME) DBGFR3StackWalkNext(PCDBGFSTACKFRAME pCurrent)
1548{
1549 return pCurrent
1550 ? pCurrent->pNextInternal
1551 : NULL;
1552}
1553
1554
1555/**
1556 * Ends a stack walk process.
1557 *
1558 * This *must* be called after a successful first call to any of the stack
1559 * walker functions. If not called we will leak memory or other resources.
1560 *
1561 * @param pFirstFrame The frame returned by one of the begin functions.
1562 */
1563VMMR3DECL(void) DBGFR3StackWalkEnd(PCDBGFSTACKFRAME pFirstFrame)
1564{
1565 if ( !pFirstFrame
1566 || !pFirstFrame->pFirstInternal)
1567 return;
1568
1569 PDBGFSTACKFRAME pFrame = (PDBGFSTACKFRAME)pFirstFrame->pFirstInternal;
1570 while (pFrame)
1571 {
1572 PDBGFSTACKFRAME pCur = pFrame;
1573 pFrame = (PDBGFSTACKFRAME)pCur->pNextInternal;
1574 if (pFrame)
1575 {
1576 if (pCur->pSymReturnPC == pFrame->pSymPC)
1577 pFrame->pSymPC = NULL;
1578 if (pCur->pSymReturnPC == pFrame->pSymReturnPC)
1579 pFrame->pSymReturnPC = NULL;
1580
1581 if (pCur->pSymPC == pFrame->pSymPC)
1582 pFrame->pSymPC = NULL;
1583 if (pCur->pSymPC == pFrame->pSymReturnPC)
1584 pFrame->pSymReturnPC = NULL;
1585
1586 if (pCur->pLineReturnPC == pFrame->pLinePC)
1587 pFrame->pLinePC = NULL;
1588 if (pCur->pLineReturnPC == pFrame->pLineReturnPC)
1589 pFrame->pLineReturnPC = NULL;
1590
1591 if (pCur->pLinePC == pFrame->pLinePC)
1592 pFrame->pLinePC = NULL;
1593 if (pCur->pLinePC == pFrame->pLineReturnPC)
1594 pFrame->pLineReturnPC = NULL;
1595 }
1596
1597 RTDbgSymbolFree(pCur->pSymPC);
1598 RTDbgSymbolFree(pCur->pSymReturnPC);
1599 RTDbgLineFree(pCur->pLinePC);
1600 RTDbgLineFree(pCur->pLineReturnPC);
1601
1602 if (pCur->paSureRegs)
1603 {
1604 MMR3HeapFree(pCur->paSureRegs);
1605 pCur->paSureRegs = NULL;
1606 pCur->cSureRegs = 0;
1607 }
1608
1609 pCur->pNextInternal = NULL;
1610 pCur->pFirstInternal = NULL;
1611 pCur->fFlags = 0;
1612 MMR3HeapFree(pCur);
1613 }
1614}
1615
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette