VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/DBGFStack.cpp@ 73398

Last change on this file since 73398 was 73398, checked in by vboxsync, 7 years ago

DBGF: Unwinding PE/AMD64, considered sketches for generic unwinding using unwind info.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 51.9 KB
Line 
1/* $Id: DBGFStack.cpp 73398 2018-07-30 15:48:05Z vboxsync $ */
2/** @file
3 * DBGF - Debugger Facility, Call Stack Analyser.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_DBGF
23#include <VBox/vmm/dbgf.h>
24#include <VBox/vmm/selm.h>
25#include <VBox/vmm/mm.h>
26#include "DBGFInternal.h"
27#include <VBox/vmm/vm.h>
28#include <VBox/vmm/uvm.h>
29#include <VBox/err.h>
30#include <VBox/log.h>
31#include <iprt/param.h>
32#include <iprt/assert.h>
33#include <iprt/alloca.h>
34#include <iprt/mem.h>
35#include <iprt/string.h>
36#include <iprt/formats/pecoff.h>
37
38
39/*********************************************************************************************************************************
40* Structures and Typedefs *
41*********************************************************************************************************************************/
42/**
43 * Unwind context.
44 *
45 * @note Using a constructor and destructor here for simple+safe cleanup.
46 *
47 * @todo Generalize and move to IPRT or some such place.
48 */
49typedef struct DBGFUNWINDCTX
50{
51 PUVM m_pUVM;
52 VMCPUID m_idCpu;
53 RTDBGAS m_hAs;
54
55 uint64_t m_auRegs[16];
56 uint64_t m_uPc;
57 uint64_t m_uRFlags;
58 uint16_t m_uCs;
59 uint16_t m_uSs;
60
61 RTDBGMOD m_hCached;
62 RTUINTPTR m_uCachedMapping;
63 RTUINTPTR m_cbCachedMapping;
64 uint8_t *m_pbCachedInfo;
65 size_t m_cbCachedInfo;
66
67 /** Function table for PE/AMD64 (entire m_pbCachedInfo) . */
68 PCIMAGE_RUNTIME_FUNCTION_ENTRY m_paFunctions;
69 /** Number functions in m_paFunctions. */
70 size_t m_cFunctions;
71
72 DBGFUNWINDCTX(PUVM pUVM, VMCPUID idCpu, PCCPUMCTX pInitialCtx, RTDBGAS hAs)
73 {
74 if (pInitialCtx)
75 {
76 m_auRegs[X86_GREG_xAX] = pInitialCtx->rax;
77 m_auRegs[X86_GREG_xCX] = pInitialCtx->rcx;
78 m_auRegs[X86_GREG_xDX] = pInitialCtx->rdx;
79 m_auRegs[X86_GREG_xBX] = pInitialCtx->rbx;
80 m_auRegs[X86_GREG_xSP] = pInitialCtx->rsp;
81 m_auRegs[X86_GREG_xBP] = pInitialCtx->rbp;
82 m_auRegs[X86_GREG_xSI] = pInitialCtx->rsi;
83 m_auRegs[X86_GREG_xDI] = pInitialCtx->rdi;
84 m_auRegs[X86_GREG_x8 ] = pInitialCtx->r8;
85 m_auRegs[X86_GREG_x9 ] = pInitialCtx->r9;
86 m_auRegs[X86_GREG_x10] = pInitialCtx->r10;
87 m_auRegs[X86_GREG_x11] = pInitialCtx->r11;
88 m_auRegs[X86_GREG_x12] = pInitialCtx->r12;
89 m_auRegs[X86_GREG_x13] = pInitialCtx->r13;
90 m_auRegs[X86_GREG_x14] = pInitialCtx->r14;
91 m_auRegs[X86_GREG_x15] = pInitialCtx->r15;
92 m_uPc = pInitialCtx->rip;
93 m_uCs = pInitialCtx->cs.Sel;
94 m_uSs = pInitialCtx->ss.Sel;
95 m_uRFlags = pInitialCtx->rflags.u;
96 }
97 else
98 {
99 RT_BZERO(m_auRegs, sizeof(m_auRegs));
100 m_uPc = 0;
101 m_uCs = 0;
102 m_uSs = 0;
103 m_uRFlags = 0;
104 }
105
106 m_pUVM = pUVM;
107 m_idCpu = idCpu;
108 m_hAs = DBGFR3AsResolveAndRetain(pUVM, hAs);
109
110 m_hCached = NIL_RTDBGMOD;
111 m_uCachedMapping = 0;
112 m_cbCachedMapping = 0;
113 m_pbCachedInfo = NULL;
114 m_cbCachedInfo = 0;
115 m_paFunctions = NULL;
116 m_cFunctions = 0;
117 }
118
119 ~DBGFUNWINDCTX();
120
121} DBGFUNWINDCTX;
122/** Pointer to unwind context. */
123typedef DBGFUNWINDCTX *PDBGFUNWINDCTX;
124
125
126static void dbgfR3UnwindCtxFlushCache(PDBGFUNWINDCTX pUnwindCtx)
127{
128 if (pUnwindCtx->m_hCached != NIL_RTDBGMOD)
129 {
130 RTDbgModRelease(pUnwindCtx->m_hCached);
131 pUnwindCtx->m_hCached = NIL_RTDBGMOD;
132 }
133 if (pUnwindCtx->m_pbCachedInfo)
134 {
135 RTMemFree(pUnwindCtx->m_pbCachedInfo);
136 pUnwindCtx->m_pbCachedInfo = NULL;
137 }
138 pUnwindCtx->m_cbCachedInfo = 0;
139 pUnwindCtx->m_paFunctions = NULL;
140 pUnwindCtx->m_cFunctions = 0;
141}
142
143
144DBGFUNWINDCTX::~DBGFUNWINDCTX()
145{
146 dbgfR3UnwindCtxFlushCache(this);
147 if (m_hAs != NIL_RTDBGAS)
148 {
149 RTDbgAsRelease(m_hAs);
150 m_hAs = NIL_RTDBGAS;
151 }
152}
153
154
155/**
156 * Sets PC and SP.
157 *
158 * @returns true.
159 * @param pUnwindCtx The unwind context.
160 * @param pAddrPC The program counter (PC) value to set.
161 * @param pAddrStack The stack pointer (SP) value to set.
162 */
163static bool dbgfR3UnwindCtxSetPcAndSp(PDBGFUNWINDCTX pUnwindCtx, PCDBGFADDRESS pAddrPC, PCDBGFADDRESS pAddrStack)
164{
165 if (!DBGFADDRESS_IS_FAR(pAddrPC))
166 pUnwindCtx->m_uPc = pAddrPC->FlatPtr;
167 else
168 {
169 pUnwindCtx->m_uPc = pAddrPC->off;
170 pUnwindCtx->m_uCs = pAddrPC->Sel;
171 }
172 if (!DBGFADDRESS_IS_FAR(pAddrStack))
173 pUnwindCtx->m_auRegs[X86_GREG_xSP] = pAddrStack->FlatPtr;
174 else
175 {
176 pUnwindCtx->m_auRegs[X86_GREG_xSP] = pAddrStack->off;
177 pUnwindCtx->m_uSs = pAddrStack->Sel;
178 }
179 return true;
180}
181
182
183/**
184 * Try read a 16-bit value off the stack.
185 *
186 * @param pUnwindCtx The unwind context.
187 * @param uSrcAddr The stack address.
188 * @param puDst The read destination.
189 */
190static void dbgfR3UnwindCtxLoadU16(PDBGFUNWINDCTX pUnwindCtx, uint64_t uSrcAddr, uint16_t *puDst)
191{
192 DBGFADDRESS SrcAddr;
193 DBGFR3MemRead(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu,
194 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &SrcAddr, uSrcAddr),
195 puDst, sizeof(*puDst));
196}
197
198
199/**
200 * Try read a 64-bit value off the stack.
201 *
202 * @param pUnwindCtx The unwind context.
203 * @param uSrcAddr The stack address.
204 * @param puDst The read destination.
205 */
206static void dbgfR3UnwindCtxLoadU64(PDBGFUNWINDCTX pUnwindCtx, uint64_t uSrcAddr, uint64_t *puDst)
207{
208 DBGFADDRESS SrcAddr;
209 DBGFR3MemRead(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu,
210 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &SrcAddr, uSrcAddr),
211 puDst, sizeof(*puDst));
212}
213
214
215/**
216 * Binary searches the lookup table.
217 *
218 * @returns RVA of unwind info on success, UINT32_MAX on failure.
219 * @param paFunctions The table to lookup @a offFunctionRva in.
220 * @param iEnd Size of the table.
221 * @param uRva The RVA of the function we want.
222 */
223DECLINLINE(PCIMAGE_RUNTIME_FUNCTION_ENTRY)
224dbgfR3UnwindCtxLookupUnwindInfoRva(PCIMAGE_RUNTIME_FUNCTION_ENTRY paFunctions, size_t iEnd, uint32_t uRva)
225{
226 size_t iBegin = 0;
227 while (iBegin < iEnd)
228 {
229 size_t const i = iBegin + (iEnd - iBegin) / 2;
230 PCIMAGE_RUNTIME_FUNCTION_ENTRY pEntry = &paFunctions[i];
231 if (uRva < pEntry->BeginAddress)
232 iEnd = i;
233 else if (uRva > pEntry->EndAddress)
234 iBegin = i + 1;
235 else
236 return pEntry;
237 }
238 return NULL;
239}
240
241
242/**
243 * Processes an IRET frame.
244 *
245 * @returns true.
246 * @param pUnwindCtx The unwind context.
247 * @param fErrCd Non-zero if there is an error code on the stack.
248 * @param pAddrFrame Where to return the frame pointer.
249 * @param penmRetType Where to return the return type.
250 */
251static bool dbgfR3UnwindCtxDoOneIRet(PDBGFUNWINDCTX pUnwindCtx, uint8_t fErrCd,
252 PDBGFADDRESS pAddrFrame, DBGFRETURNTYPE *penmRetType)
253{
254 Assert(fErrCd <= 1);
255 if (fErrCd)
256 pUnwindCtx->m_auRegs[X86_GREG_xSP] += 8; /* error code */
257
258 *penmRetType = DBGFRETURNTYPE_IRET64;
259 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, pAddrFrame,
260 pUnwindCtx->m_auRegs[X86_GREG_xSP] - /* pretend rbp is pushed on the stack */ 8);
261
262 dbgfR3UnwindCtxLoadU64(pUnwindCtx, pUnwindCtx->m_auRegs[X86_GREG_xSP], &pUnwindCtx->m_uPc);
263 pUnwindCtx->m_auRegs[X86_GREG_xSP] += 8; /* RIP */
264
265 dbgfR3UnwindCtxLoadU16(pUnwindCtx, pUnwindCtx->m_auRegs[X86_GREG_xSP], &pUnwindCtx->m_uCs);
266 pUnwindCtx->m_auRegs[X86_GREG_xSP] += 8; /* CS */
267
268 dbgfR3UnwindCtxLoadU64(pUnwindCtx, pUnwindCtx->m_auRegs[X86_GREG_xSP], &pUnwindCtx->m_uRFlags);
269 pUnwindCtx->m_auRegs[X86_GREG_xSP] += 8; /* EFLAGS */
270
271 uint64_t uNewRsp = (pUnwindCtx->m_auRegs[X86_GREG_xSP] - 8) & ~(uint64_t)15;
272 dbgfR3UnwindCtxLoadU64(pUnwindCtx, pUnwindCtx->m_auRegs[X86_GREG_xSP], &uNewRsp);
273 pUnwindCtx->m_auRegs[X86_GREG_xSP] += 8; /* RSP */
274
275 dbgfR3UnwindCtxLoadU16(pUnwindCtx, pUnwindCtx->m_auRegs[X86_GREG_xSP], &pUnwindCtx->m_uSs);
276 pUnwindCtx->m_auRegs[X86_GREG_xSP] += 8; /* SS */
277
278 pUnwindCtx->m_auRegs[X86_GREG_xSP] = uNewRsp;
279 return true;
280}
281
282
283/**
284 * Unwinds one frame using cached module info.
285 *
286 * @returns true on success, false on failure.
287 * @param pUnwindCtx The unwind context.
288 * @param uRvaRip The RVA of the RIP.
289 * @param pAddrFrame Where to return the frame pointer.
290 * @param penmRetType Where to return the return type.
291 */
292static bool dbgfR3UnwindCtxDoOneFrameCached(PDBGFUNWINDCTX pUnwindCtx, uint32_t uRvaRip,
293 PDBGFADDRESS pAddrFrame, DBGFRETURNTYPE *penmRetType)
294{
295 /*
296 * Lookup the unwind info RVA and try read it.
297 */
298 PCIMAGE_RUNTIME_FUNCTION_ENTRY pEntry = dbgfR3UnwindCtxLookupUnwindInfoRva(pUnwindCtx->m_paFunctions,
299 pUnwindCtx->m_cFunctions, uRvaRip);
300 if (pEntry)
301 {
302 IMAGE_RUNTIME_FUNCTION_ENTRY ChainedEntry;
303 unsigned iFrameReg = ~0U;
304 unsigned offFrameReg = 0;
305
306 int fInEpilog = -1; /* -1: not-determined-assume-false; 0: false; 1: true. */
307 uint8_t cbEpilog = 0;
308 uint8_t offEpilog = UINT8_MAX;
309 for (unsigned cChainLoops = 0; ; cChainLoops++)
310 {
311 /*
312 * Get the info.
313 */
314 union
315 {
316 uint32_t uRva;
317 uint8_t ab[ RT_OFFSETOF(IMAGE_UNWIND_INFO, aOpcodes)
318 + sizeof(IMAGE_UNWIND_CODE) * 256
319 + sizeof(IMAGE_RUNTIME_FUNCTION_ENTRY)];
320 } uBuf;
321
322 uBuf.uRva = pEntry->UnwindInfoAddress;
323 size_t cbBuf = sizeof(uBuf);
324 int rc = RTDbgModImageQueryProp(pUnwindCtx->m_hCached, RTLDRPROP_UNWIND_INFO, &uBuf, cbBuf, &cbBuf);
325 if (RT_FAILURE(rc))
326 return false;
327
328 /*
329 * Check the info.
330 */
331 ASMCompilerBarrier(); /* we're aliasing */
332 PCIMAGE_UNWIND_INFO pInfo = (PCIMAGE_UNWIND_INFO)&uBuf;
333
334 if (pInfo->Version != 1 && pInfo->Version != 2)
335 return false;
336
337 /*
338 * Execute the opcodes.
339 */
340 unsigned const cOpcodes = pInfo->CountOfCodes;
341 unsigned iOpcode = 0;
342
343 /*
344 * Check for epilog opcodes at the start and see if we're in an epilog.
345 */
346 if ( pInfo->Version >= 2
347 && iOpcode < cOpcodes
348 && pInfo->aOpcodes[iOpcode].u.UnwindOp == IMAGE_AMD64_UWOP_EPILOG)
349 {
350 if (fInEpilog == -1)
351 {
352 cbEpilog = pInfo->aOpcodes[iOpcode].u.CodeOffset;
353 Assert(cbEpilog > 0);
354
355 uint32_t uRvaEpilog = pEntry->EndAddress - cbEpilog;
356 iOpcode++;
357 if ( (pInfo->aOpcodes[iOpcode - 1].u.OpInfo & 1)
358 && uRvaRip >= uRvaEpilog)
359 {
360 offEpilog = uRvaRip - uRvaEpilog;
361 fInEpilog = 1;
362 }
363 else
364 {
365 fInEpilog = 0;
366 while (iOpcode < cOpcodes && pInfo->aOpcodes[iOpcode].u.UnwindOp == IMAGE_AMD64_UWOP_EPILOG)
367 {
368 uRvaEpilog = pEntry->EndAddress
369 - (pInfo->aOpcodes[iOpcode].u.CodeOffset + (pInfo->aOpcodes[iOpcode].u.OpInfo << 8));
370 iOpcode++;
371 if (uRvaRip - uRvaEpilog < cbEpilog)
372 {
373 offEpilog = uRvaRip - uRvaEpilog;
374 fInEpilog = 1;
375 break;
376 }
377 }
378 }
379 }
380 while (iOpcode < cOpcodes && pInfo->aOpcodes[iOpcode].u.UnwindOp == IMAGE_AMD64_UWOP_EPILOG)
381 iOpcode++;
382 }
383 if (fInEpilog != 1)
384 {
385 /*
386 * Skip opcodes that doesn't apply to us if we're in the prolog.
387 */
388 uint32_t offPc = uRvaRip - pEntry->BeginAddress;
389 if (offPc < pInfo->SizeOfProlog)
390 while (iOpcode < cOpcodes && pInfo->aOpcodes[iOpcode].u.CodeOffset > offPc)
391 iOpcode++;
392
393 /*
394 * Execute the opcodes.
395 */
396 if (pInfo->FrameRegister != 0)
397 {
398 iFrameReg = pInfo->FrameRegister;
399 offFrameReg = pInfo->FrameOffset * 16;
400 }
401 while (iOpcode < cOpcodes)
402 {
403 Assert(pInfo->aOpcodes[iOpcode].u.CodeOffset <= offPc);
404 switch (pInfo->aOpcodes[iOpcode].u.UnwindOp)
405 {
406 case IMAGE_AMD64_UWOP_PUSH_NONVOL:
407 pUnwindCtx->m_auRegs[X86_GREG_xSP] += 8;
408 dbgfR3UnwindCtxLoadU64(pUnwindCtx, pUnwindCtx->m_auRegs[X86_GREG_xSP],
409 &pUnwindCtx->m_auRegs[pInfo->aOpcodes[iOpcode].u.OpInfo]);
410 iOpcode++;
411 break;
412
413 case IMAGE_AMD64_UWOP_ALLOC_LARGE:
414 if (pInfo->aOpcodes[iOpcode].u.OpInfo == 0)
415 {
416 iOpcode += 2;
417 AssertBreak(iOpcode <= cOpcodes);
418 pUnwindCtx->m_auRegs[X86_GREG_xSP] += pInfo->aOpcodes[iOpcode - 1].FrameOffset * 8;
419 }
420 else
421 {
422 iOpcode += 3;
423 AssertBreak(iOpcode <= cOpcodes);
424 pUnwindCtx->m_auRegs[X86_GREG_xSP] += RT_MAKE_U32(pInfo->aOpcodes[iOpcode - 2].FrameOffset,
425 pInfo->aOpcodes[iOpcode - 1].FrameOffset);
426 }
427 break;
428
429 case IMAGE_AMD64_UWOP_ALLOC_SMALL:
430 AssertBreak(iOpcode <= cOpcodes);
431 pUnwindCtx->m_auRegs[X86_GREG_xSP] += pInfo->aOpcodes[iOpcode].u.OpInfo * 8 + 8;
432 iOpcode++;
433 break;
434
435 case IMAGE_AMD64_UWOP_SET_FPREG:
436 iFrameReg = pInfo->aOpcodes[iOpcode].u.OpInfo;
437 offFrameReg = pInfo->FrameOffset * 16;
438 iOpcode++;
439 break;
440
441 case IMAGE_AMD64_UWOP_SAVE_NONVOL:
442 case IMAGE_AMD64_UWOP_SAVE_NONVOL_FAR:
443 {
444 bool const fFar = pInfo->aOpcodes[iOpcode].u.UnwindOp == IMAGE_AMD64_UWOP_SAVE_NONVOL_FAR;
445 unsigned const iGreg = pInfo->aOpcodes[iOpcode].u.OpInfo;
446 uint32_t off = 0;
447 iOpcode++;
448 if (iOpcode < cOpcodes)
449 {
450 off = pInfo->aOpcodes[iOpcode].FrameOffset;
451 iOpcode++;
452 if (fFar && iOpcode < cOpcodes)
453 {
454 off |= (uint32_t)pInfo->aOpcodes[iOpcode].FrameOffset << 16;
455 iOpcode++;
456 }
457 }
458 off *= 8;
459 dbgfR3UnwindCtxLoadU64(pUnwindCtx, pUnwindCtx->m_auRegs[X86_GREG_xSP] + off, &pUnwindCtx->m_auRegs[iGreg]);
460 break;
461 }
462
463 case IMAGE_AMD64_UWOP_SAVE_XMM128:
464 iOpcode += 2;
465 break;
466
467 case IMAGE_AMD64_UWOP_SAVE_XMM128_FAR:
468 iOpcode += 3;
469 break;
470
471 case IMAGE_AMD64_UWOP_PUSH_MACHFRAME:
472 return dbgfR3UnwindCtxDoOneIRet(pUnwindCtx, pInfo->aOpcodes[iOpcode].u.OpInfo, pAddrFrame, penmRetType);
473
474 case IMAGE_AMD64_UWOP_EPILOG:
475 iOpcode += 1;
476 break;
477
478 case IMAGE_AMD64_UWOP_RESERVED_7:
479 AssertFailedReturn(false);
480
481 default:
482 AssertMsgFailedReturn(("%u\n", pInfo->aOpcodes[iOpcode].u.UnwindOp), false);
483 }
484 }
485 }
486 else
487 {
488 /*
489 * We're in the POP sequence of an epilog. The POP sequence should
490 * mirror the PUSH sequence exactly.
491 *
492 * Note! We should only end up here for the initial frame (just consider
493 * RSP, stack allocations, non-volatile register restores, ++).
494 */
495 while (iOpcode < cOpcodes)
496 {
497 switch (pInfo->aOpcodes[iOpcode].u.UnwindOp)
498 {
499 case IMAGE_AMD64_UWOP_PUSH_NONVOL:
500 pUnwindCtx->m_auRegs[X86_GREG_xSP] += 8;
501 if (offEpilog == 0)
502 dbgfR3UnwindCtxLoadU64(pUnwindCtx, pUnwindCtx->m_auRegs[X86_GREG_xSP],
503 &pUnwindCtx->m_auRegs[pInfo->aOpcodes[iOpcode].u.OpInfo]);
504 else
505 {
506 /* Decrement offEpilog by estimated POP instruction length. */
507 offEpilog -= 1;
508 if (offEpilog > 0 && pInfo->aOpcodes[iOpcode].u.OpInfo >= 8)
509 offEpilog -= 1;
510 }
511 iOpcode++;
512 break;
513
514 case IMAGE_AMD64_UWOP_PUSH_MACHFRAME: /* Must terminate an epilog, so always execute this. */
515 return dbgfR3UnwindCtxDoOneIRet(pUnwindCtx, pInfo->aOpcodes[iOpcode].u.OpInfo, pAddrFrame, penmRetType);
516
517 case IMAGE_AMD64_UWOP_ALLOC_SMALL:
518 case IMAGE_AMD64_UWOP_SET_FPREG:
519 case IMAGE_AMD64_UWOP_EPILOG:
520 iOpcode++;
521 break;
522 case IMAGE_AMD64_UWOP_SAVE_NONVOL:
523 case IMAGE_AMD64_UWOP_SAVE_XMM128:
524 iOpcode += 2;
525 break;
526 case IMAGE_AMD64_UWOP_ALLOC_LARGE:
527 case IMAGE_AMD64_UWOP_SAVE_NONVOL_FAR:
528 case IMAGE_AMD64_UWOP_SAVE_XMM128_FAR:
529 iOpcode += 3;
530 break;
531
532 default:
533 AssertMsgFailedReturn(("%u\n", pInfo->aOpcodes[iOpcode].u.UnwindOp), false);
534 }
535 }
536 }
537
538 /*
539 * Chained stuff?
540 */
541 if (!(pInfo->Flags & IMAGE_UNW_FLAGS_CHAININFO))
542 break;
543 ChainedEntry = *(PCIMAGE_RUNTIME_FUNCTION_ENTRY)&pInfo->aOpcodes[(cOpcodes + 1) & ~1];
544 pEntry = &ChainedEntry;
545 AssertReturn(cChainLoops < 32, false);
546 }
547
548 /*
549 * RSP should now give us the return address, so perform a RET.
550 */
551 *penmRetType = DBGFRETURNTYPE_NEAR64;
552 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, pAddrFrame,
553 pUnwindCtx->m_auRegs[X86_GREG_xSP] - /* pretend rbp is pushed on the stack */ 8);
554
555 dbgfR3UnwindCtxLoadU64(pUnwindCtx, pUnwindCtx->m_auRegs[X86_GREG_xSP], &pUnwindCtx->m_uPc);
556 pUnwindCtx->m_auRegs[X86_GREG_xSP] += 8;
557 return true;
558 }
559
560 RT_NOREF_PV(pAddrFrame);
561 return false;
562}
563
564
565/**
566 * Tries to unwind one frame using unwind info.
567 *
568 * @returns true on success, false on failure.
569 * @param pUnwindCtx The unwind context.
570 * @param pAddrFrame Where to return the frame pointer.
571 * @param penmRetType Where to return the return type.
572 */
573static bool dbgfR3UnwindCtxDoOneFrame(PDBGFUNWINDCTX pUnwindCtx, PDBGFADDRESS pAddrFrame, DBGFRETURNTYPE *penmRetType)
574{
575 /*
576 * Hope for the same module as last time around.
577 */
578 RTUINTPTR offCache = pUnwindCtx->m_uPc - pUnwindCtx->m_uCachedMapping;
579 if (offCache < pUnwindCtx->m_cbCachedMapping)
580 return dbgfR3UnwindCtxDoOneFrameCached(pUnwindCtx, offCache, pAddrFrame, penmRetType);
581
582 /*
583 * Try locate the module.
584 */
585 RTDBGMOD hDbgMod = NIL_RTDBGMOD;
586 RTUINTPTR uBase = 0;
587 RTDBGSEGIDX idxSeg = NIL_RTDBGSEGIDX;
588 int rc = RTDbgAsModuleByAddr(pUnwindCtx->m_hAs, pUnwindCtx->m_uPc, &hDbgMod, &uBase, &idxSeg);
589 if (RT_SUCCESS(rc))
590 {
591 /* We cache the module regardless of unwind info. */
592 dbgfR3UnwindCtxFlushCache(pUnwindCtx);
593 pUnwindCtx->m_hCached = hDbgMod;
594 pUnwindCtx->m_uCachedMapping = uBase;
595 pUnwindCtx->m_cbCachedMapping = idxSeg == NIL_RTDBGSEGIDX ? RTDbgModImageSize(hDbgMod)
596 : RTDbgModSegmentSize(hDbgMod, idxSeg);
597
598 /* Play simple for now. */
599 if ( idxSeg == NIL_RTDBGSEGIDX
600 && RTDbgModImageGetFormat(hDbgMod) == RTLDRFMT_PE
601 && RTDbgModImageGetArch(hDbgMod) == RTLDRARCH_AMD64)
602 {
603 /*
604 * Try query the unwind data.
605 */
606 uint32_t uDummy;
607 size_t cbNeeded = 0;
608 rc = RTDbgModImageQueryProp(hDbgMod, RTLDRPROP_UNWIND_TABLE, &uDummy, 0, &cbNeeded);
609 if ( rc == VERR_BUFFER_OVERFLOW
610 && cbNeeded >= sizeof(*pUnwindCtx->m_paFunctions)
611 && cbNeeded < _64M)
612 {
613 void *pvBuf = RTMemAllocZ(cbNeeded + 32);
614 if (pvBuf)
615 {
616 rc = RTDbgModImageQueryProp(hDbgMod, RTLDRPROP_UNWIND_TABLE, pvBuf, cbNeeded + 32, &cbNeeded);
617 if (RT_SUCCESS(rc))
618 {
619 pUnwindCtx->m_pbCachedInfo = (uint8_t *)pvBuf;
620 pUnwindCtx->m_cbCachedInfo = cbNeeded;
621 pUnwindCtx->m_paFunctions = (PCIMAGE_RUNTIME_FUNCTION_ENTRY)pvBuf;
622 pUnwindCtx->m_cFunctions = cbNeeded / sizeof(*pUnwindCtx->m_paFunctions);
623
624 return dbgfR3UnwindCtxDoOneFrameCached(pUnwindCtx, pUnwindCtx->m_uPc - pUnwindCtx->m_uCachedMapping,
625 pAddrFrame, penmRetType);
626 }
627 RTMemFree(pvBuf);
628 }
629 }
630 }
631 }
632 return false;
633}
634
635
636/**
637 * Read stack memory, will init entire buffer.
638 */
639DECLINLINE(int) dbgfR3StackRead(PUVM pUVM, VMCPUID idCpu, void *pvBuf, PCDBGFADDRESS pSrcAddr, size_t cb, size_t *pcbRead)
640{
641 int rc = DBGFR3MemRead(pUVM, idCpu, pSrcAddr, pvBuf, cb);
642 if (RT_FAILURE(rc))
643 {
644 /* fallback: byte by byte and zero the ones we fail to read. */
645 size_t cbRead;
646 for (cbRead = 0; cbRead < cb; cbRead++)
647 {
648 DBGFADDRESS Addr = *pSrcAddr;
649 rc = DBGFR3MemRead(pUVM, idCpu, DBGFR3AddrAdd(&Addr, cbRead), (uint8_t *)pvBuf + cbRead, 1);
650 if (RT_FAILURE(rc))
651 break;
652 }
653 if (cbRead)
654 rc = VINF_SUCCESS;
655 memset((char *)pvBuf + cbRead, 0, cb - cbRead);
656 *pcbRead = cbRead;
657 }
658 else
659 *pcbRead = cb;
660 return rc;
661}
662
663
664/**
665 * Internal worker routine.
666 *
667 * On x86 the typical stack frame layout is like this:
668 * .. ..
669 * 16 parameter 2
670 * 12 parameter 1
671 * 8 parameter 0
672 * 4 return address
673 * 0 old ebp; current ebp points here
674 */
675DECL_NO_INLINE(static, int) dbgfR3StackWalk(PDBGFUNWINDCTX pUnwindCtx, PDBGFSTACKFRAME pFrame, bool fFirst)
676{
677 /*
678 * Stop if we got a read error in the previous run.
679 */
680 if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_LAST)
681 return VERR_NO_MORE_FILES;
682
683 /*
684 * Advance the frame (except for the first).
685 */
686 if (!fFirst) /** @todo we can probably eliminate this fFirst business... */
687 {
688 /* frame, pc and stack is taken from the existing frames return members. */
689 pFrame->AddrFrame = pFrame->AddrReturnFrame;
690 pFrame->AddrPC = pFrame->AddrReturnPC;
691 pFrame->pSymPC = pFrame->pSymReturnPC;
692 pFrame->pLinePC = pFrame->pLineReturnPC;
693
694 /* increment the frame number. */
695 pFrame->iFrame++;
696
697 /* UNWIND_INFO_RET -> USED_UNWIND; return type */
698 if (!(pFrame->fFlags & DBGFSTACKFRAME_FLAGS_UNWIND_INFO_RET))
699 pFrame->fFlags &= ~DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO;
700 else
701 {
702 pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO;
703 pFrame->fFlags &= ~DBGFSTACKFRAME_FLAGS_UNWIND_INFO_RET;
704 if (pFrame->enmReturnFrameReturnType != DBGFRETURNTYPE_INVALID)
705 {
706 pFrame->enmReturnType = pFrame->enmReturnFrameReturnType;
707 pFrame->enmReturnFrameReturnType = DBGFRETURNTYPE_INVALID;
708 }
709 }
710 }
711
712 /*
713 * Figure the return address size and use the old PC to guess stack item size.
714 */
715 /** @todo this is bogus... */
716 unsigned cbRetAddr = DBGFReturnTypeSize(pFrame->enmReturnType);
717 unsigned cbStackItem;
718 switch (pFrame->AddrPC.fFlags & DBGFADDRESS_FLAGS_TYPE_MASK)
719 {
720 case DBGFADDRESS_FLAGS_FAR16: cbStackItem = 2; break;
721 case DBGFADDRESS_FLAGS_FAR32: cbStackItem = 4; break;
722 case DBGFADDRESS_FLAGS_FAR64: cbStackItem = 8; break;
723 case DBGFADDRESS_FLAGS_RING0: cbStackItem = sizeof(RTHCUINTPTR); break;
724 default:
725 switch (pFrame->enmReturnType)
726 {
727 case DBGFRETURNTYPE_FAR16:
728 case DBGFRETURNTYPE_IRET16:
729 case DBGFRETURNTYPE_IRET32_V86:
730 case DBGFRETURNTYPE_NEAR16: cbStackItem = 2; break;
731
732 case DBGFRETURNTYPE_FAR32:
733 case DBGFRETURNTYPE_IRET32:
734 case DBGFRETURNTYPE_IRET32_PRIV:
735 case DBGFRETURNTYPE_NEAR32: cbStackItem = 4; break;
736
737 case DBGFRETURNTYPE_FAR64:
738 case DBGFRETURNTYPE_IRET64:
739 case DBGFRETURNTYPE_NEAR64: cbStackItem = 8; break;
740
741 default:
742 AssertMsgFailed(("%d\n", pFrame->enmReturnType));
743 cbStackItem = 4;
744 break;
745 }
746 }
747
748 /*
749 * Read the raw frame data.
750 * We double cbRetAddr in case we have a far return.
751 */
752 union
753 {
754 uint64_t *pu64;
755 uint32_t *pu32;
756 uint16_t *pu16;
757 uint8_t *pb;
758 void *pv;
759 } u, uRet, uArgs, uBp;
760 size_t cbRead = cbRetAddr*2 + cbStackItem + sizeof(pFrame->Args);
761 u.pv = alloca(cbRead);
762 uBp = u;
763 uRet.pb = u.pb + cbStackItem;
764 uArgs.pb = u.pb + cbStackItem + cbRetAddr;
765
766 Assert(DBGFADDRESS_IS_VALID(&pFrame->AddrFrame));
767 int rc = dbgfR3StackRead(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, u.pv, &pFrame->AddrFrame, cbRead, &cbRead);
768 if ( RT_FAILURE(rc)
769 || cbRead < cbRetAddr + cbStackItem)
770 pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_LAST;
771
772 /*
773 * Return Frame address.
774 *
775 * If we used unwind info to get here, the unwind register context will be
776 * positioned after the return instruction has been executed. We start by
777 * picking up the rBP register here for return frame and will try improve
778 * on it further down by using unwind info.
779 */
780 pFrame->AddrReturnFrame = pFrame->AddrFrame;
781 if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO)
782 {
783 if ( pFrame->enmReturnType == DBGFRETURNTYPE_IRET32_PRIV
784 || pFrame->enmReturnType == DBGFRETURNTYPE_IRET64)
785 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnFrame,
786 pUnwindCtx->m_uSs, pUnwindCtx->m_auRegs[X86_GREG_xBP]);
787 else if (pFrame->enmReturnType == DBGFRETURNTYPE_IRET32_V86)
788 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &pFrame->AddrReturnFrame,
789 ((uint32_t)pUnwindCtx->m_uSs << 4) + pUnwindCtx->m_auRegs[X86_GREG_xBP]);
790 else
791 {
792 pFrame->AddrReturnFrame.off = pUnwindCtx->m_auRegs[X86_GREG_xBP];
793 pFrame->AddrReturnFrame.FlatPtr += pFrame->AddrReturnFrame.off - pFrame->AddrFrame.off;
794 }
795 }
796 else
797 {
798 switch (cbStackItem)
799 {
800 case 2: pFrame->AddrReturnFrame.off = *uBp.pu16; break;
801 case 4: pFrame->AddrReturnFrame.off = *uBp.pu32; break;
802 case 8: pFrame->AddrReturnFrame.off = *uBp.pu64; break;
803 default: AssertMsgFailedReturn(("cbStackItem=%d\n", cbStackItem), VERR_DBGF_STACK_IPE_1);
804 }
805
806 /* Watcom tries to keep the frame pointer odd for far returns. */
807 if ( cbStackItem <= 4
808 && !(pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO))
809 {
810 if (pFrame->AddrReturnFrame.off & 1)
811 {
812 pFrame->AddrReturnFrame.off &= ~(RTGCUINTPTR)1;
813 if (pFrame->enmReturnType == DBGFRETURNTYPE_NEAR16)
814 {
815 pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_USED_ODD_EVEN;
816 pFrame->enmReturnType = DBGFRETURNTYPE_FAR16;
817 cbRetAddr = 4;
818 }
819 else if (pFrame->enmReturnType == DBGFRETURNTYPE_NEAR32)
820 {
821 pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_USED_ODD_EVEN;
822 pFrame->enmReturnType = DBGFRETURNTYPE_FAR32;
823 cbRetAddr = 8;
824 }
825 }
826 else if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_ODD_EVEN)
827 {
828 if (pFrame->enmReturnType == DBGFRETURNTYPE_FAR16)
829 {
830 pFrame->enmReturnType = DBGFRETURNTYPE_NEAR16;
831 cbRetAddr = 2;
832 }
833 else if (pFrame->enmReturnType == DBGFRETURNTYPE_NEAR32)
834 {
835 pFrame->enmReturnType = DBGFRETURNTYPE_FAR32;
836 cbRetAddr = 4;
837 }
838 pFrame->fFlags &= ~DBGFSTACKFRAME_FLAGS_USED_ODD_EVEN;
839 }
840 uArgs.pb = u.pb + cbStackItem + cbRetAddr;
841 }
842
843 pFrame->AddrReturnFrame.FlatPtr += pFrame->AddrReturnFrame.off - pFrame->AddrFrame.off;
844 }
845
846 /*
847 * Return Stack Address.
848 */
849 pFrame->AddrReturnStack = pFrame->AddrReturnFrame;
850 if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO)
851 {
852 if ( pFrame->enmReturnType == DBGFRETURNTYPE_IRET32_PRIV
853 && pFrame->enmReturnType == DBGFRETURNTYPE_IRET64)
854 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnStack,
855 pUnwindCtx->m_uSs, pUnwindCtx->m_auRegs[X86_GREG_xSP]);
856 else if (pFrame->enmReturnType == DBGFRETURNTYPE_IRET32_V86)
857 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &pFrame->AddrReturnStack,
858 ((uint32_t)pUnwindCtx->m_uSs << 4) + pUnwindCtx->m_auRegs[X86_GREG_xSP]);
859 else
860 {
861 pFrame->AddrReturnStack.off = pUnwindCtx->m_auRegs[X86_GREG_xSP];
862 pFrame->AddrReturnStack.FlatPtr += pFrame->AddrReturnStack.off - pFrame->AddrStack.off;
863 }
864 }
865 else
866 {
867 pFrame->AddrReturnStack.off += cbStackItem + cbRetAddr;
868 pFrame->AddrReturnStack.FlatPtr += cbStackItem + cbRetAddr;
869 }
870
871 /*
872 * Return PC.
873 */
874 pFrame->AddrReturnPC = pFrame->AddrPC;
875 if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO)
876 {
877 if (DBGFReturnTypeIsNear(pFrame->enmReturnType))
878 {
879 pFrame->AddrReturnPC.off = pUnwindCtx->m_uPc;
880 pFrame->AddrReturnPC.FlatPtr += pFrame->AddrReturnPC.off - pFrame->AddrPC.off;
881 }
882 else
883 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC,
884 pUnwindCtx->m_uCs, pUnwindCtx->m_uPc);
885 }
886 else
887 switch (pFrame->enmReturnType)
888 {
889 case DBGFRETURNTYPE_NEAR16:
890 if (DBGFADDRESS_IS_VALID(&pFrame->AddrReturnPC))
891 {
892 pFrame->AddrReturnPC.FlatPtr += *uRet.pu16 - pFrame->AddrReturnPC.off;
893 pFrame->AddrReturnPC.off = *uRet.pu16;
894 }
895 else
896 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &pFrame->AddrReturnPC, *uRet.pu16);
897 break;
898 case DBGFRETURNTYPE_NEAR32:
899 if (DBGFADDRESS_IS_VALID(&pFrame->AddrReturnPC))
900 {
901 pFrame->AddrReturnPC.FlatPtr += *uRet.pu32 - pFrame->AddrReturnPC.off;
902 pFrame->AddrReturnPC.off = *uRet.pu32;
903 }
904 else
905 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &pFrame->AddrReturnPC, *uRet.pu32);
906 break;
907 case DBGFRETURNTYPE_NEAR64:
908 if (DBGFADDRESS_IS_VALID(&pFrame->AddrReturnPC))
909 {
910 pFrame->AddrReturnPC.FlatPtr += *uRet.pu64 - pFrame->AddrReturnPC.off;
911 pFrame->AddrReturnPC.off = *uRet.pu64;
912 }
913 else
914 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &pFrame->AddrReturnPC, *uRet.pu64);
915 break;
916 case DBGFRETURNTYPE_FAR16:
917 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[1], uRet.pu16[0]);
918 break;
919 case DBGFRETURNTYPE_FAR32:
920 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[2], uRet.pu32[0]);
921 break;
922 case DBGFRETURNTYPE_FAR64:
923 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[4], uRet.pu64[0]);
924 break;
925 case DBGFRETURNTYPE_IRET16:
926 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[1], uRet.pu16[0]);
927 break;
928 case DBGFRETURNTYPE_IRET32:
929 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[2], uRet.pu32[0]);
930 break;
931 case DBGFRETURNTYPE_IRET32_PRIV:
932 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[2], uRet.pu32[0]);
933 break;
934 case DBGFRETURNTYPE_IRET32_V86:
935 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[2], uRet.pu32[0]);
936 break;
937 case DBGFRETURNTYPE_IRET64:
938 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[4], uRet.pu64[0]);
939 break;
940 default:
941 AssertMsgFailed(("enmReturnType=%d\n", pFrame->enmReturnType));
942 return VERR_INVALID_PARAMETER;
943 }
944
945
946 pFrame->pSymReturnPC = DBGFR3AsSymbolByAddrA(pUnwindCtx->m_pUVM, pUnwindCtx->m_hAs, &pFrame->AddrReturnPC,
947 RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL | RTDBGSYMADDR_FLAGS_SKIP_ABS_IN_DEFERRED,
948 NULL /*poffDisp*/, NULL /*phMod*/);
949 pFrame->pLineReturnPC = DBGFR3AsLineByAddrA(pUnwindCtx->m_pUVM, pUnwindCtx->m_hAs, &pFrame->AddrReturnPC,
950 NULL /*poffDisp*/, NULL /*phMod*/);
951
952 /*
953 * Frame bitness flag.
954 */
955 /** @todo use previous return type for this? */
956 pFrame->fFlags &= ~(DBGFSTACKFRAME_FLAGS_16BIT | DBGFSTACKFRAME_FLAGS_32BIT | DBGFSTACKFRAME_FLAGS_64BIT);
957 switch (cbStackItem)
958 {
959 case 2: pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_16BIT; break;
960 case 4: pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_32BIT; break;
961 case 8: pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_64BIT; break;
962 default: AssertMsgFailedReturn(("cbStackItem=%d\n", cbStackItem), VERR_DBGF_STACK_IPE_2);
963 }
964
965 /*
966 * The arguments.
967 */
968 memcpy(&pFrame->Args, uArgs.pv, sizeof(pFrame->Args));
969
970 /*
971 * Try use unwind information to locate the return frame pointer (for the
972 * next loop iteration).
973 */
974 Assert(!(pFrame->fFlags & DBGFSTACKFRAME_FLAGS_UNWIND_INFO_RET));
975 pFrame->enmReturnFrameReturnType = DBGFRETURNTYPE_INVALID;
976 if (!(pFrame->fFlags & DBGFSTACKFRAME_FLAGS_LAST))
977 {
978 /* Set PC and SP if we didn't unwind our way here (context will then point
979 and the return PC and SP already). */
980 if (!(pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO))
981 {
982 dbgfR3UnwindCtxSetPcAndSp(pUnwindCtx, &pFrame->AddrReturnPC, &pFrame->AddrReturnStack);
983 }
984 /** @todo Reevaluate CS if the previous frame return type isn't near. */
985
986 DBGFADDRESS AddrReturnFrame = pFrame->AddrReturnFrame;
987 DBGFRETURNTYPE enmReturnType = pFrame->enmReturnType;
988 if (dbgfR3UnwindCtxDoOneFrame(pUnwindCtx, &AddrReturnFrame, &enmReturnType))
989 {
990 pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_UNWIND_INFO_RET;
991 pFrame->AddrReturnFrame = AddrReturnFrame;
992 pFrame->enmReturnFrameReturnType = enmReturnType;
993 }
994 }
995
996 return VINF_SUCCESS;
997}
998
999
1000/**
1001 * Walks the entire stack allocating memory as we walk.
1002 */
1003static DECLCALLBACK(int) dbgfR3StackWalkCtxFull(PUVM pUVM, VMCPUID idCpu, PCCPUMCTX pCtx, RTDBGAS hAs,
1004 DBGFCODETYPE enmCodeType,
1005 PCDBGFADDRESS pAddrFrame,
1006 PCDBGFADDRESS pAddrStack,
1007 PCDBGFADDRESS pAddrPC,
1008 DBGFRETURNTYPE enmReturnType,
1009 PCDBGFSTACKFRAME *ppFirstFrame)
1010{
1011 DBGFUNWINDCTX UnwindCtx(pUVM, idCpu, pCtx, hAs);
1012
1013 /* alloc first frame. */
1014 PDBGFSTACKFRAME pCur = (PDBGFSTACKFRAME)MMR3HeapAllocZU(pUVM, MM_TAG_DBGF_STACK, sizeof(*pCur));
1015 if (!pCur)
1016 return VERR_NO_MEMORY;
1017
1018 /*
1019 * Initialize the frame.
1020 */
1021 pCur->pNextInternal = NULL;
1022 pCur->pFirstInternal = pCur;
1023
1024 int rc = VINF_SUCCESS;
1025 if (pAddrPC)
1026 pCur->AddrPC = *pAddrPC;
1027 else if (enmCodeType != DBGFCODETYPE_GUEST)
1028 DBGFR3AddrFromFlat(pUVM, &pCur->AddrPC, pCtx->rip);
1029 else
1030 rc = DBGFR3AddrFromSelOff(pUVM, idCpu, &pCur->AddrPC, pCtx->cs.Sel, pCtx->rip);
1031 if (RT_SUCCESS(rc))
1032 {
1033 uint64_t fAddrMask;
1034 if (enmCodeType == DBGFCODETYPE_RING0)
1035 fAddrMask = HC_ARCH_BITS == 64 ? UINT64_MAX : UINT32_MAX;
1036 else if (enmCodeType == DBGFCODETYPE_HYPER)
1037 fAddrMask = UINT32_MAX;
1038 else if (DBGFADDRESS_IS_FAR16(&pCur->AddrPC))
1039 fAddrMask = UINT16_MAX;
1040 else if (DBGFADDRESS_IS_FAR32(&pCur->AddrPC))
1041 fAddrMask = UINT32_MAX;
1042 else if (DBGFADDRESS_IS_FAR64(&pCur->AddrPC))
1043 fAddrMask = UINT64_MAX;
1044 else
1045 {
1046 PVMCPU pVCpu = VMMGetCpuById(pUVM->pVM, idCpu);
1047 CPUMMODE enmCpuMode = CPUMGetGuestMode(pVCpu);
1048 if (enmCpuMode == CPUMMODE_REAL)
1049 {
1050 fAddrMask = UINT16_MAX;
1051 if (enmReturnType == DBGFRETURNTYPE_INVALID)
1052 pCur->enmReturnType = DBGFRETURNTYPE_NEAR16;
1053 }
1054 else if ( enmCpuMode == CPUMMODE_PROTECTED
1055 || !CPUMIsGuestIn64BitCode(pVCpu))
1056 {
1057 fAddrMask = UINT32_MAX;
1058 if (enmReturnType == DBGFRETURNTYPE_INVALID)
1059 pCur->enmReturnType = DBGFRETURNTYPE_NEAR32;
1060 }
1061 else
1062 {
1063 fAddrMask = UINT64_MAX;
1064 if (enmReturnType == DBGFRETURNTYPE_INVALID)
1065 pCur->enmReturnType = DBGFRETURNTYPE_NEAR64;
1066 }
1067 }
1068
1069 if (enmReturnType == DBGFRETURNTYPE_INVALID)
1070 switch (pCur->AddrPC.fFlags & DBGFADDRESS_FLAGS_TYPE_MASK)
1071 {
1072 case DBGFADDRESS_FLAGS_FAR16: pCur->enmReturnType = DBGFRETURNTYPE_NEAR16; break;
1073 case DBGFADDRESS_FLAGS_FAR32: pCur->enmReturnType = DBGFRETURNTYPE_NEAR32; break;
1074 case DBGFADDRESS_FLAGS_FAR64: pCur->enmReturnType = DBGFRETURNTYPE_NEAR64; break;
1075 case DBGFADDRESS_FLAGS_RING0:
1076 pCur->enmReturnType = HC_ARCH_BITS == 64 ? DBGFRETURNTYPE_NEAR64 : DBGFRETURNTYPE_NEAR32;
1077 break;
1078 default:
1079 pCur->enmReturnType = DBGFRETURNTYPE_NEAR32;
1080 break;
1081 }
1082
1083
1084 if (pAddrStack)
1085 pCur->AddrStack = *pAddrStack;
1086 else if (enmCodeType != DBGFCODETYPE_GUEST)
1087 DBGFR3AddrFromFlat(pUVM, &pCur->AddrStack, pCtx->rsp & fAddrMask);
1088 else
1089 rc = DBGFR3AddrFromSelOff(pUVM, idCpu, &pCur->AddrStack, pCtx->ss.Sel, pCtx->rsp & fAddrMask);
1090
1091 Assert(!(pCur->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO));
1092 if (pAddrFrame)
1093 pCur->AddrFrame = *pAddrFrame;
1094 else if ( RT_SUCCESS(rc)
1095 && dbgfR3UnwindCtxSetPcAndSp(&UnwindCtx, &pCur->AddrPC, &pCur->AddrStack)
1096 && dbgfR3UnwindCtxDoOneFrame(&UnwindCtx, &pCur->AddrFrame, &pCur->enmReturnType))
1097 {
1098 pCur->fFlags |= DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO;
1099 }
1100 else if (enmCodeType != DBGFCODETYPE_GUEST)
1101 DBGFR3AddrFromFlat(pUVM, &pCur->AddrFrame, pCtx->rbp & fAddrMask);
1102 else if (RT_SUCCESS(rc))
1103 rc = DBGFR3AddrFromSelOff(pUVM, idCpu, &pCur->AddrFrame, pCtx->ss.Sel, pCtx->rbp & fAddrMask);
1104
1105 /*
1106 * The first frame.
1107 */
1108 if (RT_SUCCESS(rc))
1109 {
1110 if (DBGFADDRESS_IS_VALID(&pCur->AddrPC))
1111 {
1112 pCur->pSymPC = DBGFR3AsSymbolByAddrA(pUVM, hAs, &pCur->AddrPC,
1113 RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL | RTDBGSYMADDR_FLAGS_SKIP_ABS_IN_DEFERRED,
1114 NULL /*poffDisp*/, NULL /*phMod*/);
1115 pCur->pLinePC = DBGFR3AsLineByAddrA(pUVM, hAs, &pCur->AddrPC, NULL /*poffDisp*/, NULL /*phMod*/);
1116 }
1117
1118 rc = dbgfR3StackWalk(&UnwindCtx, pCur, true /*fFirst*/);
1119 }
1120 }
1121 else
1122 pCur->enmReturnType = enmReturnType;
1123 if (RT_FAILURE(rc))
1124 {
1125 DBGFR3StackWalkEnd(pCur);
1126 return rc;
1127 }
1128
1129 /*
1130 * The other frames.
1131 */
1132 DBGFSTACKFRAME Next = *pCur;
1133 while (!(pCur->fFlags & (DBGFSTACKFRAME_FLAGS_LAST | DBGFSTACKFRAME_FLAGS_MAX_DEPTH | DBGFSTACKFRAME_FLAGS_LOOP)))
1134 {
1135 /* try walk. */
1136 rc = dbgfR3StackWalk(&UnwindCtx, &Next, false /*fFirst*/);
1137 if (RT_FAILURE(rc))
1138 break;
1139
1140 /* add the next frame to the chain. */
1141 PDBGFSTACKFRAME pNext = (PDBGFSTACKFRAME)MMR3HeapAllocU(pUVM, MM_TAG_DBGF_STACK, sizeof(*pNext));
1142 if (!pNext)
1143 {
1144 DBGFR3StackWalkEnd(pCur);
1145 return VERR_NO_MEMORY;
1146 }
1147 *pNext = Next;
1148 pCur->pNextInternal = pNext;
1149 pCur = pNext;
1150 Assert(pCur->pNextInternal == NULL);
1151
1152 /* check for loop */
1153 for (PCDBGFSTACKFRAME pLoop = pCur->pFirstInternal;
1154 pLoop && pLoop != pCur;
1155 pLoop = pLoop->pNextInternal)
1156 if (pLoop->AddrFrame.FlatPtr == pCur->AddrFrame.FlatPtr)
1157 {
1158 pCur->fFlags |= DBGFSTACKFRAME_FLAGS_LOOP;
1159 break;
1160 }
1161
1162 /* check for insane recursion */
1163 if (pCur->iFrame >= 2048)
1164 pCur->fFlags |= DBGFSTACKFRAME_FLAGS_MAX_DEPTH;
1165 }
1166
1167 *ppFirstFrame = pCur->pFirstInternal;
1168 return rc;
1169}
1170
1171
1172/**
1173 * Common worker for DBGFR3StackWalkBeginGuestEx, DBGFR3StackWalkBeginHyperEx,
1174 * DBGFR3StackWalkBeginGuest and DBGFR3StackWalkBeginHyper.
1175 */
1176static int dbgfR3StackWalkBeginCommon(PUVM pUVM,
1177 VMCPUID idCpu,
1178 DBGFCODETYPE enmCodeType,
1179 PCDBGFADDRESS pAddrFrame,
1180 PCDBGFADDRESS pAddrStack,
1181 PCDBGFADDRESS pAddrPC,
1182 DBGFRETURNTYPE enmReturnType,
1183 PCDBGFSTACKFRAME *ppFirstFrame)
1184{
1185 /*
1186 * Validate parameters.
1187 */
1188 *ppFirstFrame = NULL;
1189 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1190 PVM pVM = pUVM->pVM;
1191 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1192 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
1193 if (pAddrFrame)
1194 AssertReturn(DBGFR3AddrIsValid(pUVM, pAddrFrame), VERR_INVALID_PARAMETER);
1195 if (pAddrStack)
1196 AssertReturn(DBGFR3AddrIsValid(pUVM, pAddrStack), VERR_INVALID_PARAMETER);
1197 if (pAddrPC)
1198 AssertReturn(DBGFR3AddrIsValid(pUVM, pAddrPC), VERR_INVALID_PARAMETER);
1199 AssertReturn(enmReturnType >= DBGFRETURNTYPE_INVALID && enmReturnType < DBGFRETURNTYPE_END, VERR_INVALID_PARAMETER);
1200
1201 /*
1202 * Get the CPUM context pointer and pass it on the specified EMT.
1203 */
1204 RTDBGAS hAs;
1205 PCCPUMCTX pCtx;
1206 switch (enmCodeType)
1207 {
1208 case DBGFCODETYPE_GUEST:
1209 pCtx = CPUMQueryGuestCtxPtr(VMMGetCpuById(pVM, idCpu));
1210 hAs = DBGF_AS_GLOBAL;
1211 break;
1212 case DBGFCODETYPE_HYPER:
1213 pCtx = CPUMQueryGuestCtxPtr(VMMGetCpuById(pVM, idCpu));
1214 hAs = DBGF_AS_RC_AND_GC_GLOBAL;
1215 break;
1216 case DBGFCODETYPE_RING0:
1217 pCtx = NULL; /* No valid context present. */
1218 hAs = DBGF_AS_R0;
1219 break;
1220 default:
1221 AssertFailedReturn(VERR_INVALID_PARAMETER);
1222 }
1223 return VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3StackWalkCtxFull, 10,
1224 pUVM, idCpu, pCtx, hAs, enmCodeType,
1225 pAddrFrame, pAddrStack, pAddrPC, enmReturnType, ppFirstFrame);
1226}
1227
1228
1229/**
1230 * Begins a guest stack walk, extended version.
1231 *
1232 * This will walk the current stack, constructing a list of info frames which is
1233 * returned to the caller. The caller uses DBGFR3StackWalkNext to traverse the
1234 * list and DBGFR3StackWalkEnd to release it.
1235 *
1236 * @returns VINF_SUCCESS on success.
1237 * @returns VERR_NO_MEMORY if we're out of memory.
1238 *
1239 * @param pUVM The user mode VM handle.
1240 * @param idCpu The ID of the virtual CPU which stack we want to walk.
1241 * @param enmCodeType Code type
1242 * @param pAddrFrame Frame address to start at. (Optional)
1243 * @param pAddrStack Stack address to start at. (Optional)
1244 * @param pAddrPC Program counter to start at. (Optional)
1245 * @param enmReturnType The return address type. (Optional)
1246 * @param ppFirstFrame Where to return the pointer to the first info frame.
1247 */
1248VMMR3DECL(int) DBGFR3StackWalkBeginEx(PUVM pUVM,
1249 VMCPUID idCpu,
1250 DBGFCODETYPE enmCodeType,
1251 PCDBGFADDRESS pAddrFrame,
1252 PCDBGFADDRESS pAddrStack,
1253 PCDBGFADDRESS pAddrPC,
1254 DBGFRETURNTYPE enmReturnType,
1255 PCDBGFSTACKFRAME *ppFirstFrame)
1256{
1257 return dbgfR3StackWalkBeginCommon(pUVM, idCpu, enmCodeType, pAddrFrame, pAddrStack, pAddrPC, enmReturnType, ppFirstFrame);
1258}
1259
1260
1261/**
1262 * Begins a guest stack walk.
1263 *
1264 * This will walk the current stack, constructing a list of info frames which is
1265 * returned to the caller. The caller uses DBGFR3StackWalkNext to traverse the
1266 * list and DBGFR3StackWalkEnd to release it.
1267 *
1268 * @returns VINF_SUCCESS on success.
1269 * @returns VERR_NO_MEMORY if we're out of memory.
1270 *
1271 * @param pUVM The user mode VM handle.
1272 * @param idCpu The ID of the virtual CPU which stack we want to walk.
1273 * @param enmCodeType Code type
1274 * @param ppFirstFrame Where to return the pointer to the first info frame.
1275 */
1276VMMR3DECL(int) DBGFR3StackWalkBegin(PUVM pUVM, VMCPUID idCpu, DBGFCODETYPE enmCodeType, PCDBGFSTACKFRAME *ppFirstFrame)
1277{
1278 return dbgfR3StackWalkBeginCommon(pUVM, idCpu, enmCodeType, NULL, NULL, NULL, DBGFRETURNTYPE_INVALID, ppFirstFrame);
1279}
1280
1281/**
1282 * Gets the next stack frame.
1283 *
1284 * @returns Pointer to the info for the next stack frame.
1285 * NULL if no more frames.
1286 *
1287 * @param pCurrent Pointer to the current stack frame.
1288 *
1289 */
1290VMMR3DECL(PCDBGFSTACKFRAME) DBGFR3StackWalkNext(PCDBGFSTACKFRAME pCurrent)
1291{
1292 return pCurrent
1293 ? pCurrent->pNextInternal
1294 : NULL;
1295}
1296
1297
1298/**
1299 * Ends a stack walk process.
1300 *
1301 * This *must* be called after a successful first call to any of the stack
1302 * walker functions. If not called we will leak memory or other resources.
1303 *
1304 * @param pFirstFrame The frame returned by one of the begin functions.
1305 */
1306VMMR3DECL(void) DBGFR3StackWalkEnd(PCDBGFSTACKFRAME pFirstFrame)
1307{
1308 if ( !pFirstFrame
1309 || !pFirstFrame->pFirstInternal)
1310 return;
1311
1312 PDBGFSTACKFRAME pFrame = (PDBGFSTACKFRAME)pFirstFrame->pFirstInternal;
1313 while (pFrame)
1314 {
1315 PDBGFSTACKFRAME pCur = pFrame;
1316 pFrame = (PDBGFSTACKFRAME)pCur->pNextInternal;
1317 if (pFrame)
1318 {
1319 if (pCur->pSymReturnPC == pFrame->pSymPC)
1320 pFrame->pSymPC = NULL;
1321 if (pCur->pSymReturnPC == pFrame->pSymReturnPC)
1322 pFrame->pSymReturnPC = NULL;
1323
1324 if (pCur->pSymPC == pFrame->pSymPC)
1325 pFrame->pSymPC = NULL;
1326 if (pCur->pSymPC == pFrame->pSymReturnPC)
1327 pFrame->pSymReturnPC = NULL;
1328
1329 if (pCur->pLineReturnPC == pFrame->pLinePC)
1330 pFrame->pLinePC = NULL;
1331 if (pCur->pLineReturnPC == pFrame->pLineReturnPC)
1332 pFrame->pLineReturnPC = NULL;
1333
1334 if (pCur->pLinePC == pFrame->pLinePC)
1335 pFrame->pLinePC = NULL;
1336 if (pCur->pLinePC == pFrame->pLineReturnPC)
1337 pFrame->pLineReturnPC = NULL;
1338 }
1339
1340 RTDbgSymbolFree(pCur->pSymPC);
1341 RTDbgSymbolFree(pCur->pSymReturnPC);
1342 RTDbgLineFree(pCur->pLinePC);
1343 RTDbgLineFree(pCur->pLineReturnPC);
1344
1345 pCur->pNextInternal = NULL;
1346 pCur->pFirstInternal = NULL;
1347 pCur->fFlags = 0;
1348 MMR3HeapFree(pCur);
1349 }
1350}
1351
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette