VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/DBGFStack.cpp@ 107227

Last change on this file since 107227 was 107227, checked in by vboxsync, 6 weeks ago

VMM: Cleaning up ARMv8 / x86 split. jiraref:VBP-1470

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 53.4 KB
Line 
1/* $Id: DBGFStack.cpp 107227 2024-12-04 15:20:14Z vboxsync $ */
2/** @file
3 * DBGF - Debugger Facility, Call Stack Analyser.
4 */
5
6/*
7 * Copyright (C) 2006-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_DBGF
33#include <VBox/vmm/dbgf.h>
34#include <VBox/vmm/selm.h>
35#include <VBox/vmm/mm.h>
36#include "DBGFInternal.h"
37#include <VBox/vmm/vm.h>
38#include <VBox/vmm/uvm.h>
39#include <VBox/err.h>
40#include <VBox/log.h>
41#include <iprt/param.h>
42#include <iprt/assert.h>
43#include <iprt/alloca.h>
44#include <iprt/mem.h>
45#include <iprt/string.h>
46#include <iprt/formats/pecoff.h>
47
48
49/*********************************************************************************************************************************
50* Structures and Typedefs *
51*********************************************************************************************************************************/
52static DECLCALLBACK(int) dbgfR3StackReadCallback(PRTDBGUNWINDSTATE pThis, RTUINTPTR uSp, size_t cbToRead, void *pvDst);
53
54/**
55 * Unwind context.
56 *
57 * @note Using a constructor and destructor here for simple+safe cleanup.
58 */
59typedef struct DBGFUNWINDCTX
60{
61 PUVM m_pUVM;
62 VMCPUID m_idCpu;
63 RTDBGAS m_hAs;
64 PCCPUMCTX m_pInitialCtx;
65 bool m_fIsHostRing0;
66 uint64_t m_uOsScratch; /**< For passing to DBGFOSREG::pfnStackUnwindAssist. */
67
68 RTDBGMOD m_hCached;
69 RTUINTPTR m_uCachedMapping;
70 RTUINTPTR m_cbCachedMapping;
71 RTDBGSEGIDX m_idxCachedSegMapping;
72
73 RTDBGUNWINDSTATE m_State;
74
75 DBGFUNWINDCTX(PUVM pUVM, VMCPUID idCpu, PCCPUMCTX pInitialCtx, RTDBGAS hAs)
76 {
77 m_State.u32Magic = RTDBGUNWINDSTATE_MAGIC;
78#ifdef VBOX_VMM_TARGET_ARMV8
79 m_State.enmArch = RTLDRARCH_ARM64;
80#elif defined(VBOX_VMM_TARGET_X86)
81 m_State.enmArch = RTLDRARCH_AMD64;
82#else
83# error "port me"
84#endif
85 m_State.pfnReadStack = dbgfR3StackReadCallback;
86 m_State.pvUser = this;
87 RT_ZERO(m_State.u);
88 if (pInitialCtx)
89 {
90#ifdef VBOX_VMM_TARGET_ARMV8
91 AssertCompile(RT_ELEMENTS(m_State.u.armv8.auGprs) == RT_ELEMENTS(pInitialCtx->aGRegs));
92
93 m_State.uPc = pInitialCtx->Pc.u64;
94 m_State.u.armv8.uSpEl0 = pInitialCtx->aSpReg[0].u64;
95 m_State.u.armv8.uSpEl1 = pInitialCtx->aSpReg[1].u64;
96
97 for (uint32_t i = 0; i < RT_ELEMENTS(m_State.u.armv8.auGprs); i++)
98 m_State.u.armv8.auGprs[i] = pInitialCtx->aGRegs[i].x;
99
100#elif defined(VBOX_VMM_TARGET_X86)
101 m_State.u.x86.auRegs[X86_GREG_xAX] = pInitialCtx->rax;
102 m_State.u.x86.auRegs[X86_GREG_xCX] = pInitialCtx->rcx;
103 m_State.u.x86.auRegs[X86_GREG_xDX] = pInitialCtx->rdx;
104 m_State.u.x86.auRegs[X86_GREG_xBX] = pInitialCtx->rbx;
105 m_State.u.x86.auRegs[X86_GREG_xSP] = pInitialCtx->rsp;
106 m_State.u.x86.auRegs[X86_GREG_xBP] = pInitialCtx->rbp;
107 m_State.u.x86.auRegs[X86_GREG_xSI] = pInitialCtx->rsi;
108 m_State.u.x86.auRegs[X86_GREG_xDI] = pInitialCtx->rdi;
109 m_State.u.x86.auRegs[X86_GREG_x8 ] = pInitialCtx->r8;
110 m_State.u.x86.auRegs[X86_GREG_x9 ] = pInitialCtx->r9;
111 m_State.u.x86.auRegs[X86_GREG_x10] = pInitialCtx->r10;
112 m_State.u.x86.auRegs[X86_GREG_x11] = pInitialCtx->r11;
113 m_State.u.x86.auRegs[X86_GREG_x12] = pInitialCtx->r12;
114 m_State.u.x86.auRegs[X86_GREG_x13] = pInitialCtx->r13;
115 m_State.u.x86.auRegs[X86_GREG_x14] = pInitialCtx->r14;
116 m_State.u.x86.auRegs[X86_GREG_x15] = pInitialCtx->r15;
117 m_State.uPc = pInitialCtx->rip;
118 m_State.u.x86.uRFlags = pInitialCtx->rflags.u;
119 m_State.u.x86.auSegs[X86_SREG_ES] = pInitialCtx->es.Sel;
120 m_State.u.x86.auSegs[X86_SREG_CS] = pInitialCtx->cs.Sel;
121 m_State.u.x86.auSegs[X86_SREG_SS] = pInitialCtx->ss.Sel;
122 m_State.u.x86.auSegs[X86_SREG_DS] = pInitialCtx->ds.Sel;
123 m_State.u.x86.auSegs[X86_SREG_GS] = pInitialCtx->gs.Sel;
124 m_State.u.x86.auSegs[X86_SREG_FS] = pInitialCtx->fs.Sel;
125 m_State.u.x86.fRealOrV86 = CPUMIsGuestInRealOrV86ModeEx(pInitialCtx);
126#endif
127 }
128 else if (hAs == DBGF_AS_R0)
129 VMMR3InitR0StackUnwindState(pUVM, idCpu, &m_State);
130
131 m_pUVM = pUVM;
132 m_idCpu = idCpu;
133 m_hAs = DBGFR3AsResolveAndRetain(pUVM, hAs);
134 m_pInitialCtx = pInitialCtx;
135 m_fIsHostRing0 = hAs == DBGF_AS_R0;
136 m_uOsScratch = 0;
137
138 m_hCached = NIL_RTDBGMOD;
139 m_uCachedMapping = 0;
140 m_cbCachedMapping = 0;
141 m_idxCachedSegMapping = NIL_RTDBGSEGIDX;
142 }
143
144 ~DBGFUNWINDCTX();
145
146} DBGFUNWINDCTX;
147/** Pointer to unwind context. */
148typedef DBGFUNWINDCTX *PDBGFUNWINDCTX;
149
150
151static void dbgfR3UnwindCtxFlushCache(PDBGFUNWINDCTX pUnwindCtx)
152{
153 if (pUnwindCtx->m_hCached != NIL_RTDBGMOD)
154 {
155 RTDbgModRelease(pUnwindCtx->m_hCached);
156 pUnwindCtx->m_hCached = NIL_RTDBGMOD;
157 }
158 pUnwindCtx->m_cbCachedMapping = 0;
159 pUnwindCtx->m_idxCachedSegMapping = NIL_RTDBGSEGIDX;
160}
161
162
163DBGFUNWINDCTX::~DBGFUNWINDCTX()
164{
165 dbgfR3UnwindCtxFlushCache(this);
166 if (m_hAs != NIL_RTDBGAS)
167 {
168 RTDbgAsRelease(m_hAs);
169 m_hAs = NIL_RTDBGAS;
170 }
171}
172
173
174/**
175 * @interface_method_impl{RTDBGUNWINDSTATE,pfnReadStack}
176 */
177static DECLCALLBACK(int) dbgfR3StackReadCallback(PRTDBGUNWINDSTATE pThis, RTUINTPTR uSp, size_t cbToRead, void *pvDst)
178{
179#ifdef VBOX_VMM_TARGET_ARMV8
180 Assert(pThis->enmArch == RTLDRARCH_ARM64);
181#elif defined(VBOX_VMM_TARGET_X86)
182 Assert( pThis->enmArch == RTLDRARCH_AMD64
183 || pThis->enmArch == RTLDRARCH_X86_32);
184#else
185# error "port me"
186#endif
187
188 PDBGFUNWINDCTX pUnwindCtx = (PDBGFUNWINDCTX)pThis->pvUser;
189 DBGFADDRESS SrcAddr;
190 int rc = VINF_SUCCESS;
191 if (pUnwindCtx->m_fIsHostRing0)
192 DBGFR3AddrFromHostR0(&SrcAddr, uSp);
193 else
194 {
195#ifdef VBOX_VMM_TARGET_X86
196 if ( pThis->enmArch == RTLDRARCH_X86_32
197 || pThis->enmArch == RTLDRARCH_X86_16)
198 {
199 if (!pThis->u.x86.fRealOrV86)
200 rc = DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &SrcAddr, pThis->u.x86.auSegs[X86_SREG_SS], uSp);
201 else
202 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &SrcAddr, uSp + ((uint32_t)pThis->u.x86.auSegs[X86_SREG_SS] << 4));
203 }
204 else
205 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &SrcAddr, uSp);
206#else
207 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &SrcAddr, uSp);
208#endif
209 }
210 if (RT_SUCCESS(rc))
211 rc = DBGFR3MemRead(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &SrcAddr, pvDst, cbToRead);
212 if (RT_SUCCESS(rc))
213 return rc;
214 return -rc; /* Ignore read errors. */
215}
216
217
218/**
219 * Sets PC and SP.
220 *
221 * @returns true.
222 * @param pUnwindCtx The unwind context.
223 * @param pAddrPC The program counter (PC) value to set.
224 * @param pAddrStack The stack pointer (SP) value to set.
225 */
226static bool dbgfR3UnwindCtxSetPcAndSp(PDBGFUNWINDCTX pUnwindCtx, PCDBGFADDRESS pAddrPC, PCDBGFADDRESS pAddrStack)
227{
228#ifdef VBOX_VMM_TARGET_ARMV8
229 Assert(pUnwindCtx->m_State.enmArch == RTLDRARCH_ARM64);
230
231 Assert(!DBGFADDRESS_IS_FAR(pAddrPC));
232 pUnwindCtx->m_State.uPc = pAddrPC->FlatPtr;
233 Assert(!DBGFADDRESS_IS_FAR(pAddrStack));
234 pUnwindCtx->m_State.u.armv8.uSpEl1 = pAddrStack->FlatPtr; /** @todo EL0 stack pointer. */
235
236#elif defined(VBOX_VMM_TARGET_X86)
237 Assert( pUnwindCtx->m_State.enmArch == RTLDRARCH_AMD64
238 || pUnwindCtx->m_State.enmArch == RTLDRARCH_X86_32);
239
240 if (!DBGFADDRESS_IS_FAR(pAddrPC))
241 pUnwindCtx->m_State.uPc = pAddrPC->FlatPtr;
242 else
243 {
244 pUnwindCtx->m_State.uPc = pAddrPC->off;
245 pUnwindCtx->m_State.u.x86.auSegs[X86_SREG_CS] = pAddrPC->Sel;
246 }
247 if (!DBGFADDRESS_IS_FAR(pAddrStack))
248 pUnwindCtx->m_State.u.x86.auRegs[X86_GREG_xSP] = pAddrStack->FlatPtr;
249 else
250 {
251 pUnwindCtx->m_State.u.x86.auRegs[X86_GREG_xSP] = pAddrStack->off;
252 pUnwindCtx->m_State.u.x86.auSegs[X86_SREG_SS] = pAddrStack->Sel;
253 }
254
255#else
256# error "port me"
257#endif
258
259 return true;
260}
261
262
263/**
264 * Tries to unwind one frame using unwind info.
265 *
266 * @returns true on success, false on failure.
267 * @param pUnwindCtx The unwind context.
268 */
269static bool dbgfR3UnwindCtxDoOneFrame(PDBGFUNWINDCTX pUnwindCtx)
270{
271 /*
272 * Need to load it into the cache?
273 */
274 RTUINTPTR offCache = pUnwindCtx->m_State.uPc - pUnwindCtx->m_uCachedMapping;
275 if (offCache >= pUnwindCtx->m_cbCachedMapping)
276 {
277 RTDBGMOD hDbgMod = NIL_RTDBGMOD;
278 RTUINTPTR uBase = 0;
279 RTDBGSEGIDX idxSeg = NIL_RTDBGSEGIDX;
280 int rc = RTDbgAsModuleByAddr(pUnwindCtx->m_hAs, pUnwindCtx->m_State.uPc, &hDbgMod, &uBase, &idxSeg);
281 if (RT_SUCCESS(rc))
282 {
283 dbgfR3UnwindCtxFlushCache(pUnwindCtx);
284 pUnwindCtx->m_hCached = hDbgMod;
285 pUnwindCtx->m_uCachedMapping = uBase;
286 pUnwindCtx->m_idxCachedSegMapping = idxSeg;
287 pUnwindCtx->m_cbCachedMapping = idxSeg == NIL_RTDBGSEGIDX ? RTDbgModImageSize(hDbgMod)
288 : RTDbgModSegmentSize(hDbgMod, idxSeg);
289 offCache = pUnwindCtx->m_State.uPc - uBase;
290 }
291 else
292 return false;
293 }
294
295 /*
296 * Do the lookup.
297 */
298 AssertCompile(UINT32_MAX == NIL_RTDBGSEGIDX);
299 int rc = RTDbgModUnwindFrame(pUnwindCtx->m_hCached, pUnwindCtx->m_idxCachedSegMapping, offCache, &pUnwindCtx->m_State);
300 if (RT_SUCCESS(rc))
301 return true;
302 return false;
303}
304
305
306/**
307 * Read stack memory, will init entire buffer.
308 */
309DECLINLINE(int) dbgfR3StackRead(PUVM pUVM, VMCPUID idCpu, void *pvBuf, PCDBGFADDRESS pSrcAddr, size_t cb, size_t *pcbRead)
310{
311 int rc = DBGFR3MemRead(pUVM, idCpu, pSrcAddr, pvBuf, cb);
312 if (RT_FAILURE(rc))
313 {
314 /* fallback: byte by byte and zero the ones we fail to read. */
315 size_t cbRead;
316 for (cbRead = 0; cbRead < cb; cbRead++)
317 {
318 DBGFADDRESS Addr = *pSrcAddr;
319 rc = DBGFR3MemRead(pUVM, idCpu, DBGFR3AddrAdd(&Addr, cbRead), (uint8_t *)pvBuf + cbRead, 1);
320 if (RT_FAILURE(rc))
321 break;
322 }
323 if (cbRead)
324 rc = VINF_SUCCESS;
325 memset((char *)pvBuf + cbRead, 0, cb - cbRead);
326 *pcbRead = cbRead;
327 }
328 else
329 *pcbRead = cb;
330 return rc;
331}
332
333#if !defined(VBOX_VMM_TARGET_ARMV8) /** @todo Unused on ARMv8 for now. */
334/**
335 * Collects sure registers on frame exit.
336 *
337 * @returns VINF_SUCCESS or VERR_NO_MEMORY.
338 * @param pUVM The user mode VM handle for the allocation.
339 * @param pFrame The frame in question.
340 * @param pState The unwind state.
341 */
342static int dbgfR3StackWalkCollectRegisterChanges(PUVM pUVM, PDBGFSTACKFRAME pFrame, PRTDBGUNWINDSTATE pState)
343{
344 pFrame->cSureRegs = 0;
345 pFrame->paSureRegs = NULL;
346
347#if defined(VBOX_VMM_TARGET_ARMV8)
348 if (pState->enmArch == RTLDRARCH_ARM64)
349 {
350 if (pState->u.armv8.Loaded.fAll)
351 {
352 /*
353 * Count relevant registers.
354 */
355 uint32_t cRegs = 0;
356 if (pState->u.armv8.Loaded.s.fRegs)
357 for (uint32_t f = 1; f < RT_BIT_32(RT_ELEMENTS(pState->u.armv8.auGprs)); f <<= 1)
358 if (pState->u.armv8.Loaded.s.fRegs & f)
359 cRegs++;
360 if (pState->u.armv8.Loaded.s.fSpEl0)
361 cRegs++;
362 if (pState->u.armv8.Loaded.s.fSpEl1)
363 cRegs++;
364 if (pState->u.armv8.Loaded.s.fPc)
365 cRegs++;
366 if (cRegs > 0)
367 {
368 /*
369 * Allocate the arrays.
370 */
371 PDBGFREGVALEX paSureRegs = (PDBGFREGVALEX)MMR3HeapAllocZU(pUVM, MM_TAG_DBGF_STACK, sizeof(DBGFREGVALEX) * cRegs);
372 AssertReturn(paSureRegs, VERR_NO_MEMORY);
373 pFrame->paSureRegs = paSureRegs;
374 pFrame->cSureRegs = cRegs;
375
376 /*
377 * Popuplate the arrays.
378 */
379 uint32_t iReg = 0;
380 if (pState->u.armv8.Loaded.s.fRegs)
381 for (uint32_t i = 0; i < RT_ELEMENTS(pState->u.armv8.auGprs); i++)
382 if (pState->u.armv8.Loaded.s.fRegs & RT_BIT(i))
383 {
384 paSureRegs[iReg].Value.u64 = pState->u.armv8.auGprs[i];
385 paSureRegs[iReg].enmType = DBGFREGVALTYPE_U64;
386 paSureRegs[iReg].enmReg = (DBGFREG)(DBGFREG_ARMV8_GREG_X0 + i);
387 iReg++;
388 }
389
390 if (iReg < cRegs)
391 {
392 if (pState->u.armv8.Loaded.s.fSpEl0)
393 {
394 paSureRegs[iReg].Value.u64 = pState->u.armv8.uSpEl0;
395 paSureRegs[iReg].enmType = DBGFREGVALTYPE_U64;
396 paSureRegs[iReg].enmReg = DBGFREG_ARMV8_SP_EL0;
397 iReg++;
398 }
399 if (pState->u.armv8.Loaded.s.fSpEl1)
400 {
401 paSureRegs[iReg].Value.u64 = pState->u.armv8.uSpEl1;
402 paSureRegs[iReg].enmType = DBGFREGVALTYPE_U64;
403 paSureRegs[iReg].enmReg = DBGFREG_ARMV8_SP_EL1;
404 iReg++;
405 }
406 if (pState->u.armv8.Loaded.s.fPc)
407 {
408 paSureRegs[iReg].Value.u64 = pState->uPc;
409 paSureRegs[iReg].enmType = DBGFREGVALTYPE_U64;
410 paSureRegs[iReg].enmReg = DBGFREG_ARMV8_PC;
411 iReg++;
412 }
413 }
414 Assert(iReg == cRegs);
415 }
416 }
417 }
418#else
419 if ( pState->enmArch == RTLDRARCH_AMD64
420 || pState->enmArch == RTLDRARCH_X86_32
421 || pState->enmArch == RTLDRARCH_X86_16)
422 {
423 if (pState->u.x86.Loaded.fAll)
424 {
425 /*
426 * Count relevant registers.
427 */
428 uint32_t cRegs = 0;
429 if (pState->u.x86.Loaded.s.fRegs)
430 for (uint32_t f = 1; f < RT_BIT_32(RT_ELEMENTS(pState->u.x86.auRegs)); f <<= 1)
431 if (pState->u.x86.Loaded.s.fRegs & f)
432 cRegs++;
433 if (pState->u.x86.Loaded.s.fSegs)
434 for (uint32_t f = 1; f < RT_BIT_32(RT_ELEMENTS(pState->u.x86.auSegs)); f <<= 1)
435 if (pState->u.x86.Loaded.s.fSegs & f)
436 cRegs++;
437 if (pState->u.x86.Loaded.s.fRFlags)
438 cRegs++;
439 if (pState->u.x86.Loaded.s.fErrCd)
440 cRegs++;
441 if (cRegs > 0)
442 {
443 /*
444 * Allocate the arrays.
445 */
446 PDBGFREGVALEX paSureRegs = (PDBGFREGVALEX)MMR3HeapAllocZU(pUVM, MM_TAG_DBGF_STACK, sizeof(DBGFREGVALEX) * cRegs);
447 AssertReturn(paSureRegs, VERR_NO_MEMORY);
448 pFrame->paSureRegs = paSureRegs;
449 pFrame->cSureRegs = cRegs;
450
451 /*
452 * Popuplate the arrays.
453 */
454 uint32_t iReg = 0;
455 if (pState->u.x86.Loaded.s.fRegs)
456 for (uint32_t i = 0; i < RT_ELEMENTS(pState->u.x86.auRegs); i++)
457 if (pState->u.x86.Loaded.s.fRegs & RT_BIT(i))
458 {
459 paSureRegs[iReg].Value.u64 = pState->u.x86.auRegs[i];
460 paSureRegs[iReg].enmType = DBGFREGVALTYPE_U64;
461 paSureRegs[iReg].enmReg = (DBGFREG)(DBGFREG_RAX + i);
462 iReg++;
463 }
464
465 if (pState->u.x86.Loaded.s.fSegs)
466 for (uint32_t i = 0; i < RT_ELEMENTS(pState->u.x86.auSegs); i++)
467 if (pState->u.x86.Loaded.s.fSegs & RT_BIT(i))
468 {
469 paSureRegs[iReg].Value.u16 = pState->u.x86.auSegs[i];
470 paSureRegs[iReg].enmType = DBGFREGVALTYPE_U16;
471 switch (i)
472 {
473 case X86_SREG_ES: paSureRegs[iReg].enmReg = DBGFREG_ES; break;
474 case X86_SREG_CS: paSureRegs[iReg].enmReg = DBGFREG_CS; break;
475 case X86_SREG_SS: paSureRegs[iReg].enmReg = DBGFREG_SS; break;
476 case X86_SREG_DS: paSureRegs[iReg].enmReg = DBGFREG_DS; break;
477 case X86_SREG_FS: paSureRegs[iReg].enmReg = DBGFREG_FS; break;
478 case X86_SREG_GS: paSureRegs[iReg].enmReg = DBGFREG_GS; break;
479 default: AssertFailedBreak();
480 }
481 iReg++;
482 }
483
484 if (iReg < cRegs)
485 {
486 if (pState->u.x86.Loaded.s.fRFlags)
487 {
488 paSureRegs[iReg].Value.u64 = pState->u.x86.uRFlags;
489 paSureRegs[iReg].enmType = DBGFREGVALTYPE_U64;
490 paSureRegs[iReg].enmReg = DBGFREG_RFLAGS;
491 iReg++;
492 }
493 if (pState->u.x86.Loaded.s.fErrCd)
494 {
495 paSureRegs[iReg].Value.u64 = pState->u.x86.uErrCd;
496 paSureRegs[iReg].enmType = DBGFREGVALTYPE_U64;
497 paSureRegs[iReg].enmReg = DBGFREG_END;
498 paSureRegs[iReg].pszName = "trap-errcd";
499 iReg++;
500 }
501 }
502 Assert(iReg == cRegs);
503 }
504 }
505 }
506#endif
507
508 return VINF_SUCCESS;
509}
510#endif
511
512
513/**
514 * Internal worker routine.
515 *
516 * On aarch64 the typical stack frame layout is like this:
517 * .. ..
518 * 4 return address
519 * 0 old fp; current fp points here
520 *
521 * On x86 the typical stack frame layout is like this:
522 * .. ..
523 * 16 parameter 2
524 * 12 parameter 1
525 * 8 parameter 0
526 * 4 return address
527 * 0 old ebp; current ebp points here
528 */
529DECL_NO_INLINE(static, int) dbgfR3StackWalk(PDBGFUNWINDCTX pUnwindCtx, PDBGFSTACKFRAME pFrame, bool fFirst)
530{
531 /*
532 * Stop if we got a read error in the previous run.
533 */
534 if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_LAST)
535 return VERR_NO_MORE_FILES;
536
537 /*
538 * Advance the frame (except for the first).
539 */
540 if (!fFirst) /** @todo we can probably eliminate this fFirst business... */
541 {
542 /* frame, pc and stack is taken from the existing frames return members. */
543 pFrame->AddrFrame = pFrame->AddrReturnFrame;
544 pFrame->AddrPC = pFrame->AddrReturnPC;
545 pFrame->pSymPC = pFrame->pSymReturnPC;
546 pFrame->pLinePC = pFrame->pLineReturnPC;
547
548 /* increment the frame number. */
549 pFrame->iFrame++;
550
551 /* UNWIND_INFO_RET -> USED_UNWIND; return type */
552 if (!(pFrame->fFlags & DBGFSTACKFRAME_FLAGS_UNWIND_INFO_RET))
553 pFrame->fFlags &= ~DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO;
554 else
555 {
556 pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO;
557 pFrame->fFlags &= ~DBGFSTACKFRAME_FLAGS_UNWIND_INFO_RET;
558 if (pFrame->enmReturnFrameReturnType != RTDBGRETURNTYPE_INVALID)
559 {
560 pFrame->enmReturnType = pFrame->enmReturnFrameReturnType;
561 pFrame->enmReturnFrameReturnType = RTDBGRETURNTYPE_INVALID;
562 }
563 }
564 pFrame->fFlags &= ~DBGFSTACKFRAME_FLAGS_TRAP_FRAME;
565 }
566
567 /*
568 * Figure the return address size and use the old PC to guess stack item size.
569 */
570#ifdef VBOX_VMM_TARGET_ARMV8
571 unsigned const cbRetAddr = 8;
572 unsigned const cbStackItem = 8; /** @todo AARCH32. */
573 PVMCPUCC const pVCpu = pUnwindCtx->m_pUVM->pVM->apCpusR3[pUnwindCtx->m_idCpu];
574
575#elif defined(VBOX_VMM_TARGET_X86)
576 /** @todo this is bogus... */
577 unsigned cbRetAddr = RTDbgReturnTypeSize(pFrame->enmReturnType);
578 unsigned cbStackItem;
579 switch (pFrame->AddrPC.fFlags & DBGFADDRESS_FLAGS_TYPE_MASK)
580 {
581 case DBGFADDRESS_FLAGS_FAR16: cbStackItem = 2; break;
582 case DBGFADDRESS_FLAGS_FAR32: cbStackItem = 4; break;
583 case DBGFADDRESS_FLAGS_FAR64: cbStackItem = 8; break;
584 case DBGFADDRESS_FLAGS_RING0: cbStackItem = sizeof(RTHCUINTPTR); break;
585 default:
586 switch (pFrame->enmReturnType)
587 {
588 case RTDBGRETURNTYPE_FAR16:
589 case RTDBGRETURNTYPE_IRET16:
590 case RTDBGRETURNTYPE_IRET32_V86:
591 case RTDBGRETURNTYPE_NEAR16: cbStackItem = 2; break;
592
593 case RTDBGRETURNTYPE_FAR32:
594 case RTDBGRETURNTYPE_IRET32:
595 case RTDBGRETURNTYPE_IRET32_PRIV:
596 case RTDBGRETURNTYPE_NEAR32: cbStackItem = 4; break;
597
598 case RTDBGRETURNTYPE_FAR64:
599 case RTDBGRETURNTYPE_IRET64:
600 case RTDBGRETURNTYPE_NEAR64: cbStackItem = 8; break;
601
602 default:
603 AssertMsgFailed(("%d\n", pFrame->enmReturnType));
604 cbStackItem = 4;
605 break;
606 }
607 }
608#endif
609
610 /*
611 * Read the raw frame data.
612 * We double cbRetAddr in case we have a far return.
613 */
614 union
615 {
616 uint64_t *pu64;
617 uint32_t *pu32;
618 uint16_t *pu16;
619 uint8_t *pb;
620 void *pv;
621 } u, uRet, uArgs, uBp;
622 size_t cbRead = cbRetAddr * 2 + cbStackItem + sizeof(pFrame->Args);
623 u.pv = alloca(cbRead);
624 uBp = u;
625 uRet.pb = u.pb + cbStackItem;
626 uArgs.pb = u.pb + cbStackItem + cbRetAddr;
627
628 Assert(DBGFADDRESS_IS_VALID(&pFrame->AddrFrame));
629 int rc = dbgfR3StackRead(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, u.pv, &pFrame->AddrFrame, cbRead, &cbRead);
630 if ( RT_FAILURE(rc)
631 || cbRead < cbRetAddr + cbStackItem)
632 pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_LAST;
633
634 /*
635 * Return Frame address.
636 *
637 * If we used unwind info to get here, the unwind register context will be
638 * positioned after the return instruction has been executed. We start by
639 * picking up the rBP register here for return frame and will try improve
640 * on it further down by using unwind info.
641 */
642 pFrame->AddrReturnFrame = pFrame->AddrFrame;
643 if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO)
644 {
645#ifdef VBOX_VMM_TARGET_ARMV8
646 AssertFailed(); /** @todo */
647
648#elif defined(VBOX_VMM_TARGET_X86)
649 if ( pFrame->enmReturnType == RTDBGRETURNTYPE_IRET32_PRIV
650 || pFrame->enmReturnType == RTDBGRETURNTYPE_IRET64)
651 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnFrame,
652 pUnwindCtx->m_State.u.x86.auSegs[X86_SREG_SS], pUnwindCtx->m_State.u.x86.auRegs[X86_GREG_xBP]);
653 else if (pFrame->enmReturnType == RTDBGRETURNTYPE_IRET32_V86)
654 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &pFrame->AddrReturnFrame,
655 ((uint32_t)pUnwindCtx->m_State.u.x86.auSegs[X86_SREG_SS] << 4)
656 + pUnwindCtx->m_State.u.x86.auRegs[X86_GREG_xBP]);
657 else
658 {
659 pFrame->AddrReturnFrame.off = pUnwindCtx->m_State.u.x86.auRegs[X86_GREG_xBP];
660 pFrame->AddrReturnFrame.FlatPtr += pFrame->AddrReturnFrame.off - pFrame->AddrFrame.off;
661 }
662#endif /* VBOX_VMM_TARGET_X86 */
663 }
664 else
665 {
666 switch (cbStackItem)
667 {
668#ifdef VBOX_VMM_TARGET_ARMV8
669 case 8: pFrame->AddrReturnFrame.off = CPUMGetGCPtrPacStripped(pVCpu, *uBp.pu64); break;
670#else
671 case 8: pFrame->AddrReturnFrame.off = *uBp.pu64; break;
672#endif
673 case 4: pFrame->AddrReturnFrame.off = *uBp.pu32; break;
674#ifdef VBOX_VMM_TARGET_X86
675 case 2: pFrame->AddrReturnFrame.off = *uBp.pu16; break;
676#endif
677 default: AssertMsgFailedReturn(("cbStackItem=%d\n", cbStackItem), VERR_DBGF_STACK_IPE_1);
678 }
679
680#ifdef VBOX_VMM_TARGET_X86
681 /* Watcom tries to keep the frame pointer odd for far returns. */
682 if ( cbStackItem <= 4
683 && !(pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO))
684 {
685 if (pFrame->AddrReturnFrame.off & 1)
686 {
687 pFrame->AddrReturnFrame.off &= ~(RTGCUINTPTR)1;
688 if (pFrame->enmReturnType == RTDBGRETURNTYPE_NEAR16)
689 {
690 pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_USED_ODD_EVEN;
691 pFrame->enmReturnType = RTDBGRETURNTYPE_FAR16;
692 cbRetAddr = 4;
693 }
694 else if (pFrame->enmReturnType == RTDBGRETURNTYPE_NEAR32)
695 {
696# if 1
697 /* Assumes returning 32-bit code. */
698 pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_USED_ODD_EVEN;
699 pFrame->enmReturnType = RTDBGRETURNTYPE_FAR32;
700 cbRetAddr = 8;
701# else
702 /* Assumes returning 16-bit code. */
703 pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_USED_ODD_EVEN;
704 pFrame->enmReturnType = RTDBGRETURNTYPE_FAR16;
705 cbRetAddr = 4;
706# endif
707 }
708 }
709 else if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_ODD_EVEN)
710 {
711 if (pFrame->enmReturnType == RTDBGRETURNTYPE_FAR16)
712 {
713 pFrame->enmReturnType = RTDBGRETURNTYPE_NEAR16;
714 cbRetAddr = 2;
715 }
716 else if (pFrame->enmReturnType == RTDBGRETURNTYPE_NEAR32)
717 {
718 pFrame->enmReturnType = RTDBGRETURNTYPE_FAR32;
719 cbRetAddr = 4;
720 }
721 pFrame->fFlags &= ~DBGFSTACKFRAME_FLAGS_USED_ODD_EVEN;
722 }
723 uArgs.pb = u.pb + cbStackItem + cbRetAddr;
724 }
725#endif /* VBOX_VMM_TARGET_X86 */
726
727 pFrame->AddrReturnFrame.FlatPtr += pFrame->AddrReturnFrame.off - pFrame->AddrFrame.off;
728 }
729
730 /*
731 * Return Stack Address.
732 */
733 pFrame->AddrReturnStack = pFrame->AddrReturnFrame;
734 if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO)
735 {
736#ifdef VBOX_VMM_TARGET_ARMV8
737 AssertFailed();
738
739#elif defined(VBOX_VMM_TARGET_X86)
740 if ( pFrame->enmReturnType == RTDBGRETURNTYPE_IRET32_PRIV
741 || pFrame->enmReturnType == RTDBGRETURNTYPE_IRET64)
742 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnStack,
743 pUnwindCtx->m_State.u.x86.auSegs[X86_SREG_SS], pUnwindCtx->m_State.u.x86.auRegs[X86_GREG_xSP]);
744 else if (pFrame->enmReturnType == RTDBGRETURNTYPE_IRET32_V86)
745 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &pFrame->AddrReturnStack,
746 ((uint32_t)pUnwindCtx->m_State.u.x86.auSegs[X86_SREG_SS] << 4)
747 + pUnwindCtx->m_State.u.x86.auRegs[X86_GREG_xSP]);
748 else
749 {
750 pFrame->AddrReturnStack.off = pUnwindCtx->m_State.u.x86.auRegs[X86_GREG_xSP];
751 pFrame->AddrReturnStack.FlatPtr += pFrame->AddrReturnStack.off - pFrame->AddrStack.off;
752 }
753#endif /* VBOX_VMM_TARGET_X86 */
754 }
755 else
756 {
757 pFrame->AddrReturnStack.off += cbStackItem + cbRetAddr;
758 pFrame->AddrReturnStack.FlatPtr += cbStackItem + cbRetAddr;
759 }
760
761 /*
762 * Return PC.
763 */
764 pFrame->AddrReturnPC = pFrame->AddrPC;
765 if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO)
766 {
767#ifdef VBOX_VMM_TARGET_ARMV8
768 AssertFailed();
769
770#elif defined(VBOX_VMM_TARGET_X86)
771 if (RTDbgReturnTypeIsNear(pFrame->enmReturnType))
772 {
773 pFrame->AddrReturnPC.off = pUnwindCtx->m_State.uPc;
774 pFrame->AddrReturnPC.FlatPtr += pFrame->AddrReturnPC.off - pFrame->AddrPC.off;
775 }
776 else
777 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC,
778 pUnwindCtx->m_State.u.x86.auSegs[X86_SREG_CS], pUnwindCtx->m_State.uPc);
779#endif
780 }
781 else
782 {
783#ifdef VBOX_VMM_TARGET_ARMV8
784 switch (pFrame->enmReturnType)
785 {
786 case RTDBGRETURNTYPE_NEAR64:
787 if (DBGFADDRESS_IS_VALID(&pFrame->AddrReturnPC))
788 {
789 pFrame->AddrReturnPC.FlatPtr += CPUMGetGCPtrPacStripped(pVCpu, *uRet.pu64) - pFrame->AddrReturnPC.off;
790 pFrame->AddrReturnPC.off = CPUMGetGCPtrPacStripped(pVCpu, *uRet.pu64);
791 }
792 else
793 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &pFrame->AddrReturnPC, CPUMGetGCPtrPacStripped(pVCpu, *uRet.pu64));
794 break;
795 default:
796 AssertMsgFailed(("enmReturnType=%d\n", pFrame->enmReturnType));
797 return VERR_INVALID_PARAMETER;
798 }
799
800#elif defined(VBOX_VMM_TARGET_X86)
801 int rc2;
802 switch (pFrame->enmReturnType)
803 {
804 case RTDBGRETURNTYPE_NEAR16:
805 if (DBGFADDRESS_IS_VALID(&pFrame->AddrReturnPC))
806 {
807 pFrame->AddrReturnPC.FlatPtr += *uRet.pu16 - pFrame->AddrReturnPC.off;
808 pFrame->AddrReturnPC.off = *uRet.pu16;
809 }
810 else
811 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &pFrame->AddrReturnPC, *uRet.pu16);
812 break;
813 case RTDBGRETURNTYPE_NEAR32:
814 if (DBGFADDRESS_IS_VALID(&pFrame->AddrReturnPC))
815 {
816 pFrame->AddrReturnPC.FlatPtr += *uRet.pu32 - pFrame->AddrReturnPC.off;
817 pFrame->AddrReturnPC.off = *uRet.pu32;
818 }
819 else
820 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &pFrame->AddrReturnPC, *uRet.pu32);
821 break;
822 case RTDBGRETURNTYPE_NEAR64:
823 if (DBGFADDRESS_IS_VALID(&pFrame->AddrReturnPC))
824 {
825 pFrame->AddrReturnPC.FlatPtr += *uRet.pu64 - pFrame->AddrReturnPC.off;
826 pFrame->AddrReturnPC.off = *uRet.pu64;
827 }
828 else
829 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &pFrame->AddrReturnPC, *uRet.pu64);
830 break;
831 case RTDBGRETURNTYPE_FAR16:
832 rc2 = DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[1], uRet.pu16[0]);
833 if (RT_SUCCESS(rc2))
834 break;
835 rc2 = DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, pFrame->AddrPC.Sel, uRet.pu16[0]);
836 if (RT_SUCCESS(rc2))
837 pFrame->enmReturnType = RTDBGRETURNTYPE_NEAR16;
838 else
839 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[1], uRet.pu16[0]);
840 break;
841 case RTDBGRETURNTYPE_FAR32:
842 rc2 = DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[2], uRet.pu32[0]);
843 if (RT_SUCCESS(rc2))
844 break;
845 rc2 = DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, pFrame->AddrPC.Sel, uRet.pu32[0]);
846 if (RT_SUCCESS(rc2))
847 pFrame->enmReturnType = RTDBGRETURNTYPE_NEAR32;
848 else
849 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[2], uRet.pu32[0]);
850 break;
851 case RTDBGRETURNTYPE_FAR64:
852 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[4], uRet.pu64[0]);
853 break;
854 case RTDBGRETURNTYPE_IRET16:
855 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[1], uRet.pu16[0]);
856 break;
857 case RTDBGRETURNTYPE_IRET32:
858 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[2], uRet.pu32[0]);
859 break;
860 case RTDBGRETURNTYPE_IRET32_PRIV:
861 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[2], uRet.pu32[0]);
862 break;
863 case RTDBGRETURNTYPE_IRET32_V86:
864 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[2], uRet.pu32[0]);
865 break;
866 case RTDBGRETURNTYPE_IRET64:
867 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, uRet.pu16[4], uRet.pu64[0]);
868 break;
869 default:
870 AssertMsgFailed(("enmReturnType=%d\n", pFrame->enmReturnType));
871 return VERR_INVALID_PARAMETER;
872 }
873#endif /* VBOX_VMM_TARGET_X86 */
874 }
875
876
877 pFrame->pSymReturnPC = DBGFR3AsSymbolByAddrA(pUnwindCtx->m_pUVM, pUnwindCtx->m_hAs, &pFrame->AddrReturnPC,
878 RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL | RTDBGSYMADDR_FLAGS_SKIP_ABS_IN_DEFERRED,
879 NULL /*poffDisp*/, NULL /*phMod*/);
880 pFrame->pLineReturnPC = DBGFR3AsLineByAddrA(pUnwindCtx->m_pUVM, pUnwindCtx->m_hAs, &pFrame->AddrReturnPC,
881 NULL /*poffDisp*/, NULL /*phMod*/);
882
883 /*
884 * Frame bitness flag.
885 */
886 /** @todo use previous return type for this? */
887 pFrame->fFlags &= ~(DBGFSTACKFRAME_FLAGS_16BIT | DBGFSTACKFRAME_FLAGS_32BIT | DBGFSTACKFRAME_FLAGS_64BIT);
888 switch (cbStackItem)
889 {
890 case 4: pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_32BIT; break;
891 case 8: pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_64BIT; break;
892#ifdef VBOX_VMM_TARGET_X86
893 case 2: pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_16BIT; break;
894#endif
895 default: AssertMsgFailedReturn(("cbStackItem=%d\n", cbStackItem), VERR_DBGF_STACK_IPE_2);
896 }
897
898 /*
899 * The arguments.
900 */
901 memcpy(&pFrame->Args, uArgs.pv, sizeof(pFrame->Args));
902
903 /*
904 * Collect register changes.
905 * Then call the OS layer to assist us (e.g. NT trap frames).
906 */
907 if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO)
908 {
909#if defined(VBOX_VMM_TARGET_X86)
910 rc = dbgfR3StackWalkCollectRegisterChanges(pUnwindCtx->m_pUVM, pFrame, &pUnwindCtx->m_State);
911 if (RT_FAILURE(rc))
912 return rc;
913
914 if ( pUnwindCtx->m_pInitialCtx
915 && pUnwindCtx->m_hAs != NIL_RTDBGAS)
916 {
917 rc = dbgfR3OSStackUnwindAssist(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, pFrame, &pUnwindCtx->m_State,
918 pUnwindCtx->m_pInitialCtx, pUnwindCtx->m_hAs, &pUnwindCtx->m_uOsScratch);
919 if (RT_FAILURE(rc))
920 return rc;
921 }
922#else
923 AssertFailed();
924#endif
925 }
926
927 /*
928 * Try use unwind information to locate the return frame pointer (for the
929 * next loop iteration).
930 */
931 Assert(!(pFrame->fFlags & DBGFSTACKFRAME_FLAGS_UNWIND_INFO_RET));
932 pFrame->enmReturnFrameReturnType = RTDBGRETURNTYPE_INVALID;
933 if (!(pFrame->fFlags & DBGFSTACKFRAME_FLAGS_LAST))
934 {
935 /* Set PC and SP if we didn't unwind our way here (context will then point
936 and the return PC and SP already). */
937 if (!(pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO))
938 {
939 dbgfR3UnwindCtxSetPcAndSp(pUnwindCtx, &pFrame->AddrReturnPC, &pFrame->AddrReturnStack);
940#ifdef VBOX_VMM_TARGET_ARMV8
941 pUnwindCtx->m_State.u.armv8.auGprs[ARMV8_A64_REG_BP] = pFrame->AddrReturnFrame.off;
942#elif defined(VBOX_VMM_TARGET_X86)
943 pUnwindCtx->m_State.u.x86.auRegs[X86_GREG_xBP] = pFrame->AddrReturnFrame.off;
944#endif
945 }
946
947#ifdef VBOX_VMM_TARGET_ARMV8
948 if (pUnwindCtx->m_State.enmArch == RTLDRARCH_ARM64)
949 pUnwindCtx->m_State.u.armv8.Loaded.fAll = 0;
950 else
951 AssertFailed();
952
953#elif defined(VBOX_VMM_TARGET_X86)
954 /** @todo Reevaluate CS if the previous frame return type isn't near. */
955 if ( pUnwindCtx->m_State.enmArch == RTLDRARCH_AMD64
956 || pUnwindCtx->m_State.enmArch == RTLDRARCH_X86_32
957 || pUnwindCtx->m_State.enmArch == RTLDRARCH_X86_16)
958 pUnwindCtx->m_State.u.x86.Loaded.fAll = 0;
959 else
960 AssertFailed();
961#endif
962
963 if (dbgfR3UnwindCtxDoOneFrame(pUnwindCtx))
964 {
965#ifdef VBOX_VMM_TARGET_ARMV8
966 Assert(!pUnwindCtx->m_fIsHostRing0);
967#elif defined(VBOX_VMM_TARGET_X86)
968 if (pUnwindCtx->m_fIsHostRing0)
969 DBGFR3AddrFromHostR0(&pFrame->AddrReturnFrame, pUnwindCtx->m_State.u.x86.FrameAddr.off);
970 else
971#endif
972 {
973 DBGFADDRESS AddrReturnFrame = pFrame->AddrReturnFrame;
974#ifdef VBOX_VMM_TARGET_ARMV8
975 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &AddrReturnFrame, pUnwindCtx->m_State.u.armv8.FrameAddr);
976#elif defined(VBOX_VMM_TARGET_X86)
977 rc = DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &AddrReturnFrame,
978 pUnwindCtx->m_State.u.x86.FrameAddr.sel, pUnwindCtx->m_State.u.x86.FrameAddr.off);
979 if (RT_SUCCESS(rc))
980#endif
981 pFrame->AddrReturnFrame = AddrReturnFrame;
982 }
983 pFrame->enmReturnFrameReturnType = pUnwindCtx->m_State.enmRetType;
984 pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_UNWIND_INFO_RET;
985 }
986 }
987
988 return VINF_SUCCESS;
989}
990
991
992/**
993 * Walks the entire stack allocating memory as we walk.
994 */
995static DECLCALLBACK(int) dbgfR3StackWalkCtxFull(PUVM pUVM, VMCPUID idCpu, PCCPUMCTX pCtx, RTDBGAS hAs,
996 DBGFCODETYPE enmCodeType,
997 PCDBGFADDRESS pAddrFrame,
998 PCDBGFADDRESS pAddrStack,
999 PCDBGFADDRESS pAddrPC,
1000 RTDBGRETURNTYPE const *penmReturnType,
1001 PCDBGFSTACKFRAME *ppFirstFrame)
1002{
1003 RTDBGRETURNTYPE const enmReturnType = *penmReturnType; /* darwin/arm64 fun, see @bugref{10725} */
1004 DBGFUNWINDCTX UnwindCtx(pUVM, idCpu, pCtx, hAs);
1005
1006 /* alloc first frame. */
1007 PDBGFSTACKFRAME pCur = (PDBGFSTACKFRAME)MMR3HeapAllocZU(pUVM, MM_TAG_DBGF_STACK, sizeof(*pCur));
1008 if (!pCur)
1009 return VERR_NO_MEMORY;
1010
1011 /*
1012 * Initialize the frame.
1013 */
1014 pCur->pNextInternal = NULL;
1015 pCur->pFirstInternal = pCur;
1016
1017 int rc = VINF_SUCCESS;
1018 if (pAddrPC)
1019 pCur->AddrPC = *pAddrPC;
1020#ifdef VBOX_VMM_TARGET_ARMV8
1021 else
1022 DBGFR3AddrFromFlat(pUVM, &pCur->AddrPC, pCtx->Pc.u64);
1023#elif defined(VBOX_VMM_TARGET_X86)
1024 else if (enmCodeType != DBGFCODETYPE_GUEST)
1025 DBGFR3AddrFromFlat(pUVM, &pCur->AddrPC, pCtx->rip);
1026 else
1027 rc = DBGFR3AddrFromSelOff(pUVM, idCpu, &pCur->AddrPC, pCtx->cs.Sel, pCtx->rip);
1028#endif
1029 if (RT_SUCCESS(rc))
1030 {
1031 uint64_t fAddrMask;
1032 if (enmCodeType == DBGFCODETYPE_RING0)
1033 fAddrMask = HC_ARCH_BITS == 64 ? UINT64_MAX : UINT32_MAX;
1034 else if (enmCodeType == DBGFCODETYPE_HYPER)
1035 fAddrMask = UINT32_MAX;
1036 else if (DBGFADDRESS_IS_FAR16(&pCur->AddrPC))
1037 fAddrMask = UINT16_MAX;
1038 else if (DBGFADDRESS_IS_FAR32(&pCur->AddrPC))
1039 fAddrMask = UINT32_MAX;
1040 else if (DBGFADDRESS_IS_FAR64(&pCur->AddrPC))
1041 fAddrMask = UINT64_MAX;
1042 else
1043 {
1044 PVMCPU const pVCpu = pUVM->pVM->apCpusR3[idCpu];
1045 CPUMMODE const enmCpuMode = CPUMGetGuestMode(pVCpu);
1046
1047#ifdef VBOX_VMM_TARGET_ARMV8
1048 /** @todo */
1049 Assert(enmCpuMode == CPUMMODE_ARMV8_AARCH64); RT_NOREF(enmCpuMode);
1050 fAddrMask = UINT64_MAX;
1051 if (enmReturnType == RTDBGRETURNTYPE_INVALID)
1052 pCur->enmReturnType = RTDBGRETURNTYPE_NEAR64;
1053
1054#elif defined(VBOX_VMM_TARGET_X86)
1055 if (enmCpuMode == CPUMMODE_REAL)
1056 {
1057 fAddrMask = UINT16_MAX;
1058 if (enmReturnType == RTDBGRETURNTYPE_INVALID)
1059 pCur->enmReturnType = RTDBGRETURNTYPE_NEAR16;
1060 }
1061 else if ( enmCpuMode == CPUMMODE_PROTECTED
1062 || !CPUMIsGuestIn64BitCode(pVCpu))
1063 {
1064 fAddrMask = UINT32_MAX;
1065 if (enmReturnType == RTDBGRETURNTYPE_INVALID)
1066 pCur->enmReturnType = RTDBGRETURNTYPE_NEAR32;
1067 }
1068 else
1069 {
1070 fAddrMask = UINT64_MAX;
1071 if (enmReturnType == RTDBGRETURNTYPE_INVALID)
1072 pCur->enmReturnType = RTDBGRETURNTYPE_NEAR64;
1073 }
1074#endif
1075 }
1076
1077#ifdef VBOX_VMM_TARGET_X86
1078 if (enmReturnType == RTDBGRETURNTYPE_INVALID)
1079 switch (pCur->AddrPC.fFlags & DBGFADDRESS_FLAGS_TYPE_MASK)
1080 {
1081 case DBGFADDRESS_FLAGS_FAR16: pCur->enmReturnType = RTDBGRETURNTYPE_NEAR16; break;
1082 case DBGFADDRESS_FLAGS_FAR32: pCur->enmReturnType = RTDBGRETURNTYPE_NEAR32; break;
1083 case DBGFADDRESS_FLAGS_FAR64: pCur->enmReturnType = RTDBGRETURNTYPE_NEAR64; break;
1084 case DBGFADDRESS_FLAGS_RING0:
1085 pCur->enmReturnType = HC_ARCH_BITS == 64 ? RTDBGRETURNTYPE_NEAR64 : RTDBGRETURNTYPE_NEAR32;
1086 break;
1087 default:
1088 pCur->enmReturnType = RTDBGRETURNTYPE_NEAR32;
1089 break;
1090 }
1091#endif
1092
1093
1094 if (pAddrStack)
1095 pCur->AddrStack = *pAddrStack;
1096#ifdef VBOX_VMM_TARGET_ARMV8
1097 else
1098 DBGFR3AddrFromFlat(pUVM, &pCur->AddrStack, pCtx->aSpReg[1].u64 & fAddrMask); /** @todo EL0 stack. */
1099#elif defined(VBOX_VMM_TARGET_X86)
1100 else if (enmCodeType != DBGFCODETYPE_GUEST)
1101 DBGFR3AddrFromFlat(pUVM, &pCur->AddrStack, pCtx->rsp & fAddrMask);
1102 else
1103 rc = DBGFR3AddrFromSelOff(pUVM, idCpu, &pCur->AddrStack, pCtx->ss.Sel, pCtx->rsp & fAddrMask);
1104#endif
1105
1106 Assert(!(pCur->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO));
1107 if (pAddrFrame)
1108 pCur->AddrFrame = *pAddrFrame;
1109#ifdef VBOX_VMM_TARGET_ARMV8
1110 else
1111 DBGFR3AddrFromFlat(pUVM, &pCur->AddrFrame, pCtx->aGRegs[ARMV8_A64_REG_BP].x & fAddrMask);
1112#elif defined(VBOX_VMM_TARGET_X86)
1113 else if (enmCodeType != DBGFCODETYPE_GUEST)
1114 DBGFR3AddrFromFlat(pUVM, &pCur->AddrFrame, pCtx->rbp & fAddrMask);
1115 else if (RT_SUCCESS(rc))
1116 rc = DBGFR3AddrFromSelOff(pUVM, idCpu, &pCur->AddrFrame, pCtx->ss.Sel, pCtx->rbp & fAddrMask);
1117#endif
1118
1119 /*
1120 * Try unwind and get a better frame pointer and state.
1121 */
1122 if ( RT_SUCCESS(rc)
1123 && dbgfR3UnwindCtxSetPcAndSp(&UnwindCtx, &pCur->AddrPC, &pCur->AddrStack)
1124 && dbgfR3UnwindCtxDoOneFrame(&UnwindCtx))
1125 {
1126 pCur->enmReturnType = UnwindCtx.m_State.enmRetType;
1127 pCur->fFlags |= DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO;
1128#ifdef VBOX_VMM_TARGET_ARMV8
1129 DBGFR3AddrFromFlat(pUVM, &pCur->AddrFrame, UnwindCtx.m_State.u.armv8.FrameAddr);
1130#elif defined(VBOX_VMM_TARGET_X86)
1131 if (!UnwindCtx.m_fIsHostRing0)
1132 rc = DBGFR3AddrFromSelOff(UnwindCtx.m_pUVM, UnwindCtx.m_idCpu, &pCur->AddrFrame,
1133 UnwindCtx.m_State.u.x86.FrameAddr.sel, UnwindCtx.m_State.u.x86.FrameAddr.off);
1134 else
1135 DBGFR3AddrFromHostR0(&pCur->AddrFrame, UnwindCtx.m_State.u.x86.FrameAddr.off);
1136#endif
1137 }
1138 /*
1139 * The first frame.
1140 */
1141 if (RT_SUCCESS(rc))
1142 {
1143 if (DBGFADDRESS_IS_VALID(&pCur->AddrPC))
1144 {
1145 pCur->pSymPC = DBGFR3AsSymbolByAddrA(pUVM, hAs, &pCur->AddrPC,
1146 RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL | RTDBGSYMADDR_FLAGS_SKIP_ABS_IN_DEFERRED,
1147 NULL /*poffDisp*/, NULL /*phMod*/);
1148 pCur->pLinePC = DBGFR3AsLineByAddrA(pUVM, hAs, &pCur->AddrPC, NULL /*poffDisp*/, NULL /*phMod*/);
1149 }
1150
1151 rc = dbgfR3StackWalk(&UnwindCtx, pCur, true /*fFirst*/);
1152 }
1153 }
1154 else
1155 pCur->enmReturnType = enmReturnType;
1156 if (RT_FAILURE(rc))
1157 {
1158 DBGFR3StackWalkEnd(pCur);
1159 return rc;
1160 }
1161
1162 /*
1163 * The other frames.
1164 */
1165 DBGFSTACKFRAME Next = *pCur;
1166 while (!(pCur->fFlags & (DBGFSTACKFRAME_FLAGS_LAST | DBGFSTACKFRAME_FLAGS_MAX_DEPTH | DBGFSTACKFRAME_FLAGS_LOOP)))
1167 {
1168 Next.cSureRegs = 0;
1169 Next.paSureRegs = NULL;
1170
1171 /* try walk. */
1172 rc = dbgfR3StackWalk(&UnwindCtx, &Next, false /*fFirst*/);
1173 if (RT_FAILURE(rc))
1174 break;
1175
1176 /* add the next frame to the chain. */
1177 PDBGFSTACKFRAME pNext = (PDBGFSTACKFRAME)MMR3HeapAllocU(pUVM, MM_TAG_DBGF_STACK, sizeof(*pNext));
1178 if (!pNext)
1179 {
1180 DBGFR3StackWalkEnd(pCur);
1181 return VERR_NO_MEMORY;
1182 }
1183 *pNext = Next;
1184 pCur->pNextInternal = pNext;
1185 pCur = pNext;
1186 Assert(pCur->pNextInternal == NULL);
1187
1188 /* check for loop */
1189 for (PCDBGFSTACKFRAME pLoop = pCur->pFirstInternal;
1190 pLoop && pLoop != pCur;
1191 pLoop = pLoop->pNextInternal)
1192 if (pLoop->AddrFrame.FlatPtr == pCur->AddrFrame.FlatPtr)
1193 {
1194 pCur->fFlags |= DBGFSTACKFRAME_FLAGS_LOOP;
1195 break;
1196 }
1197
1198 /* check for insane recursion */
1199 if (pCur->iFrame >= 2048)
1200 pCur->fFlags |= DBGFSTACKFRAME_FLAGS_MAX_DEPTH;
1201 }
1202
1203 *ppFirstFrame = pCur->pFirstInternal;
1204 return rc;
1205}
1206
1207
1208/**
1209 * Common worker for DBGFR3StackWalkBeginGuestEx, DBGFR3StackWalkBeginHyperEx,
1210 * DBGFR3StackWalkBeginGuest and DBGFR3StackWalkBeginHyper.
1211 */
1212static int dbgfR3StackWalkBeginCommon(PUVM pUVM,
1213 VMCPUID idCpu,
1214 DBGFCODETYPE enmCodeType,
1215 PCDBGFADDRESS pAddrFrame,
1216 PCDBGFADDRESS pAddrStack,
1217 PCDBGFADDRESS pAddrPC,
1218 RTDBGRETURNTYPE enmReturnType,
1219 PCDBGFSTACKFRAME *ppFirstFrame)
1220{
1221 /*
1222 * Validate parameters.
1223 */
1224 *ppFirstFrame = NULL;
1225 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1226 PVM pVM = pUVM->pVM;
1227 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1228 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
1229 if (pAddrFrame)
1230 AssertReturn(DBGFR3AddrIsValid(pUVM, pAddrFrame), VERR_INVALID_PARAMETER);
1231 if (pAddrStack)
1232 AssertReturn(DBGFR3AddrIsValid(pUVM, pAddrStack), VERR_INVALID_PARAMETER);
1233 if (pAddrPC)
1234 AssertReturn(DBGFR3AddrIsValid(pUVM, pAddrPC), VERR_INVALID_PARAMETER);
1235 AssertReturn(enmReturnType >= RTDBGRETURNTYPE_INVALID && enmReturnType < RTDBGRETURNTYPE_END, VERR_INVALID_PARAMETER);
1236
1237 /*
1238 * Get the CPUM context pointer and pass it on the specified EMT.
1239 */
1240 RTDBGAS hAs;
1241 PCCPUMCTX pCtx;
1242 switch (enmCodeType)
1243 {
1244 case DBGFCODETYPE_GUEST:
1245 pCtx = CPUMQueryGuestCtxPtr(pVM->apCpusR3[idCpu]);
1246 hAs = DBGF_AS_GLOBAL;
1247 break;
1248 case DBGFCODETYPE_HYPER:
1249 pCtx = CPUMQueryGuestCtxPtr(pVM->apCpusR3[idCpu]);
1250 hAs = DBGF_AS_RC_AND_GC_GLOBAL;
1251 break;
1252 case DBGFCODETYPE_RING0:
1253 pCtx = NULL; /* No valid context present. */
1254 hAs = DBGF_AS_R0;
1255 break;
1256 default:
1257 AssertFailedReturn(VERR_INVALID_PARAMETER);
1258 }
1259 return VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3StackWalkCtxFull, 10 | VMREQ_F_EXTRA_ARGS_ALL_PTRS,
1260 pUVM, idCpu, pCtx, hAs, enmCodeType, pAddrFrame, pAddrStack, pAddrPC,
1261 &enmReturnType, ppFirstFrame);
1262}
1263
1264
1265/**
1266 * Begins a guest stack walk, extended version.
1267 *
1268 * This will walk the current stack, constructing a list of info frames which is
1269 * returned to the caller. The caller uses DBGFR3StackWalkNext to traverse the
1270 * list and DBGFR3StackWalkEnd to release it.
1271 *
1272 * @returns VINF_SUCCESS on success.
1273 * @returns VERR_NO_MEMORY if we're out of memory.
1274 *
1275 * @param pUVM The user mode VM handle.
1276 * @param idCpu The ID of the virtual CPU which stack we want to walk.
1277 * @param enmCodeType Code type
1278 * @param pAddrFrame Frame address to start at. (Optional)
1279 * @param pAddrStack Stack address to start at. (Optional)
1280 * @param pAddrPC Program counter to start at. (Optional)
1281 * @param enmReturnType The return address type. (Optional)
1282 * @param ppFirstFrame Where to return the pointer to the first info frame.
1283 */
1284VMMR3DECL(int) DBGFR3StackWalkBeginEx(PUVM pUVM,
1285 VMCPUID idCpu,
1286 DBGFCODETYPE enmCodeType,
1287 PCDBGFADDRESS pAddrFrame,
1288 PCDBGFADDRESS pAddrStack,
1289 PCDBGFADDRESS pAddrPC,
1290 RTDBGRETURNTYPE enmReturnType,
1291 PCDBGFSTACKFRAME *ppFirstFrame)
1292{
1293 return dbgfR3StackWalkBeginCommon(pUVM, idCpu, enmCodeType, pAddrFrame, pAddrStack, pAddrPC, enmReturnType, ppFirstFrame);
1294}
1295
1296
1297/**
1298 * Begins a guest stack walk.
1299 *
1300 * This will walk the current stack, constructing a list of info frames which is
1301 * returned to the caller. The caller uses DBGFR3StackWalkNext to traverse the
1302 * list and DBGFR3StackWalkEnd to release it.
1303 *
1304 * @returns VINF_SUCCESS on success.
1305 * @returns VERR_NO_MEMORY if we're out of memory.
1306 *
1307 * @param pUVM The user mode VM handle.
1308 * @param idCpu The ID of the virtual CPU which stack we want to walk.
1309 * @param enmCodeType Code type
1310 * @param ppFirstFrame Where to return the pointer to the first info frame.
1311 */
1312VMMR3DECL(int) DBGFR3StackWalkBegin(PUVM pUVM, VMCPUID idCpu, DBGFCODETYPE enmCodeType, PCDBGFSTACKFRAME *ppFirstFrame)
1313{
1314 return dbgfR3StackWalkBeginCommon(pUVM, idCpu, enmCodeType, NULL, NULL, NULL, RTDBGRETURNTYPE_INVALID, ppFirstFrame);
1315}
1316
1317/**
1318 * Gets the next stack frame.
1319 *
1320 * @returns Pointer to the info for the next stack frame.
1321 * NULL if no more frames.
1322 *
1323 * @param pCurrent Pointer to the current stack frame.
1324 *
1325 */
1326VMMR3DECL(PCDBGFSTACKFRAME) DBGFR3StackWalkNext(PCDBGFSTACKFRAME pCurrent)
1327{
1328 return pCurrent
1329 ? pCurrent->pNextInternal
1330 : NULL;
1331}
1332
1333
1334/**
1335 * Ends a stack walk process.
1336 *
1337 * This *must* be called after a successful first call to any of the stack
1338 * walker functions. If not called we will leak memory or other resources.
1339 *
1340 * @param pFirstFrame The frame returned by one of the begin functions.
1341 */
1342VMMR3DECL(void) DBGFR3StackWalkEnd(PCDBGFSTACKFRAME pFirstFrame)
1343{
1344 if ( !pFirstFrame
1345 || !pFirstFrame->pFirstInternal)
1346 return;
1347
1348 PDBGFSTACKFRAME pFrame = (PDBGFSTACKFRAME)pFirstFrame->pFirstInternal;
1349 while (pFrame)
1350 {
1351 PDBGFSTACKFRAME pCur = pFrame;
1352 pFrame = (PDBGFSTACKFRAME)pCur->pNextInternal;
1353 if (pFrame)
1354 {
1355 if (pCur->pSymReturnPC == pFrame->pSymPC)
1356 pFrame->pSymPC = NULL;
1357 if (pCur->pSymReturnPC == pFrame->pSymReturnPC)
1358 pFrame->pSymReturnPC = NULL;
1359
1360 if (pCur->pSymPC == pFrame->pSymPC)
1361 pFrame->pSymPC = NULL;
1362 if (pCur->pSymPC == pFrame->pSymReturnPC)
1363 pFrame->pSymReturnPC = NULL;
1364
1365 if (pCur->pLineReturnPC == pFrame->pLinePC)
1366 pFrame->pLinePC = NULL;
1367 if (pCur->pLineReturnPC == pFrame->pLineReturnPC)
1368 pFrame->pLineReturnPC = NULL;
1369
1370 if (pCur->pLinePC == pFrame->pLinePC)
1371 pFrame->pLinePC = NULL;
1372 if (pCur->pLinePC == pFrame->pLineReturnPC)
1373 pFrame->pLineReturnPC = NULL;
1374 }
1375
1376 RTDbgSymbolFree(pCur->pSymPC);
1377 RTDbgSymbolFree(pCur->pSymReturnPC);
1378 RTDbgLineFree(pCur->pLinePC);
1379 RTDbgLineFree(pCur->pLineReturnPC);
1380
1381 if (pCur->paSureRegs)
1382 {
1383 MMR3HeapFree(pCur->paSureRegs);
1384 pCur->paSureRegs = NULL;
1385 pCur->cSureRegs = 0;
1386 }
1387
1388 pCur->pNextInternal = NULL;
1389 pCur->pFirstInternal = NULL;
1390 pCur->fFlags = 0;
1391 MMR3HeapFree(pCur);
1392 }
1393}
1394
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette