VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/DBGFMem.cpp@ 95843

Last change on this file since 95843 was 93115, checked in by vboxsync, 3 years ago

scm --update-copyright-year

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 23.1 KB
Line 
1/* $Id: DBGFMem.cpp 93115 2022-01-01 11:31:46Z vboxsync $ */
2/** @file
3 * DBGF - Debugger Facility, Memory Methods.
4 */
5
6/*
7 * Copyright (C) 2007-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_DBGF
23#include <VBox/vmm/dbgf.h>
24#include <VBox/vmm/pgm.h>
25#include <VBox/vmm/selm.h>
26#include <VBox/vmm/hm.h>
27#include "DBGFInternal.h"
28#include <VBox/vmm/vm.h>
29#include <VBox/vmm/uvm.h>
30#include <VBox/err.h>
31#include <VBox/log.h>
32#include <VBox/vmm/mm.h>
33
34
35
36/**
37 * Scan guest memory for an exact byte string.
38 *
39 * @returns VBox status code.
40 * @param pUVM The user mode VM handle.
41 * @param idCpu The ID of the CPU context to search in.
42 * @param pAddress Where to store the mixed address.
43 * @param puAlign The alignment restriction imposed on the search result.
44 * @param pcbRange The number of bytes to scan. Passed as a pointer because
45 * it may be 64-bit.
46 * @param pabNeedle What to search for - exact search.
47 * @param cbNeedle Size of the search byte string.
48 * @param pHitAddress Where to put the address of the first hit.
49 */
50static DECLCALLBACK(int) dbgfR3MemScan(PUVM pUVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, PCRTGCUINTPTR pcbRange,
51 RTGCUINTPTR *puAlign, const uint8_t *pabNeedle, size_t cbNeedle, PDBGFADDRESS pHitAddress)
52{
53 PVM pVM = pUVM->pVM;
54 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
55 Assert(idCpu == VMMGetCpuId(pVM));
56
57 /*
58 * Validate the input we use, PGM does the rest.
59 */
60 RTGCUINTPTR cbRange = *pcbRange;
61 if (!DBGFR3AddrIsValid(pUVM, pAddress))
62 return VERR_INVALID_POINTER;
63 if (!RT_VALID_PTR(pHitAddress))
64 return VERR_INVALID_POINTER;
65
66 /*
67 * Select DBGF worker by addressing mode.
68 */
69 int rc;
70 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
71 PGMMODE enmMode = PGMGetGuestMode(pVCpu);
72 if ( enmMode == PGMMODE_REAL
73 || enmMode == PGMMODE_PROTECTED
74 || DBGFADDRESS_IS_PHYS(pAddress)
75 )
76 {
77 RTGCPHYS GCPhysAlign = *puAlign;
78 if (GCPhysAlign != *puAlign)
79 return VERR_OUT_OF_RANGE;
80 RTGCPHYS PhysHit;
81 rc = PGMR3DbgScanPhysical(pVM, pAddress->FlatPtr, cbRange, GCPhysAlign, pabNeedle, cbNeedle, &PhysHit);
82 if (RT_SUCCESS(rc))
83 DBGFR3AddrFromPhys(pUVM, pHitAddress, PhysHit);
84 }
85 else
86 {
87#if GC_ARCH_BITS > 32
88 if ( ( pAddress->FlatPtr >= _4G
89 || pAddress->FlatPtr + cbRange > _4G)
90 && enmMode != PGMMODE_AMD64
91 && enmMode != PGMMODE_AMD64_NX)
92 return VERR_DBGF_MEM_NOT_FOUND;
93#endif
94 RTGCUINTPTR GCPtrHit;
95 rc = PGMR3DbgScanVirtual(pVM, pVCpu, pAddress->FlatPtr, cbRange, *puAlign, pabNeedle, cbNeedle, &GCPtrHit);
96 if (RT_SUCCESS(rc))
97 DBGFR3AddrFromFlat(pUVM, pHitAddress, GCPtrHit);
98 }
99
100 return rc;
101}
102
103
104/**
105 * Scan guest memory for an exact byte string.
106 *
107 * @returns VBox status codes:
108 * @retval VINF_SUCCESS and *pGCPtrHit on success.
109 * @retval VERR_DBGF_MEM_NOT_FOUND if not found.
110 * @retval VERR_INVALID_POINTER if any of the pointer arguments are invalid.
111 * @retval VERR_INVALID_ARGUMENT if any other arguments are invalid.
112 *
113 * @param pUVM The user mode VM handle.
114 * @param idCpu The ID of the CPU context to search in.
115 * @param pAddress Where to store the mixed address.
116 * @param cbRange The number of bytes to scan.
117 * @param uAlign The alignment restriction imposed on the result.
118 * Usually set to 1.
119 * @param pvNeedle What to search for - exact search.
120 * @param cbNeedle Size of the search byte string.
121 * @param pHitAddress Where to put the address of the first hit.
122 *
123 * @thread Any thread.
124 */
125VMMR3DECL(int) DBGFR3MemScan(PUVM pUVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, RTGCUINTPTR cbRange, RTGCUINTPTR uAlign,
126 const void *pvNeedle, size_t cbNeedle, PDBGFADDRESS pHitAddress)
127{
128 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
129 AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_CPU_ID);
130 return VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3MemScan, 8,
131 pUVM, idCpu, pAddress, &cbRange, &uAlign, pvNeedle, cbNeedle, pHitAddress);
132
133}
134
135
136/**
137 * Read guest memory.
138 *
139 * @returns VBox status code.
140 * @param pUVM The user mode VM handle.
141 * @param idCpu The ID of the CPU context to read memory from.
142 * @param pAddress Where to start reading.
143 * @param pvBuf Where to store the data we've read.
144 * @param cbRead The number of bytes to read.
145 */
146static DECLCALLBACK(int) dbgfR3MemRead(PUVM pUVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, void *pvBuf, size_t cbRead)
147{
148 PVM pVM = pUVM->pVM;
149 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
150 Assert(idCpu == VMMGetCpuId(pVM));
151
152 /*
153 * Validate the input we use, PGM does the rest.
154 */
155 if (!DBGFR3AddrIsValid(pUVM, pAddress))
156 return VERR_INVALID_POINTER;
157 if (!RT_VALID_PTR(pvBuf))
158 return VERR_INVALID_POINTER;
159
160 /*
161 * Select PGM worker by addressing mode.
162 */
163 int rc;
164 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
165 PGMMODE enmMode = PGMGetGuestMode(pVCpu);
166 if ( enmMode == PGMMODE_REAL
167 || enmMode == PGMMODE_PROTECTED
168 || DBGFADDRESS_IS_PHYS(pAddress) )
169 rc = PGMPhysSimpleReadGCPhys(pVM, pvBuf, pAddress->FlatPtr, cbRead);
170 else
171 {
172#if GC_ARCH_BITS > 32
173 if ( ( pAddress->FlatPtr >= _4G
174 || pAddress->FlatPtr + cbRead > _4G)
175 && enmMode != PGMMODE_AMD64
176 && enmMode != PGMMODE_AMD64_NX)
177 return VERR_PAGE_TABLE_NOT_PRESENT;
178#endif
179 rc = PGMPhysSimpleReadGCPtr(pVCpu, pvBuf, pAddress->FlatPtr, cbRead);
180 }
181 return rc;
182}
183
184
185/**
186 * Read guest memory.
187 *
188 * @returns VBox status code.
189 *
190 * @param pUVM The user mode VM handle.
191 * @param idCpu The ID of the source CPU context (for the address).
192 * @param pAddress Where to start reading.
193 * @param pvBuf Where to store the data we've read.
194 * @param cbRead The number of bytes to read.
195 */
196VMMR3DECL(int) DBGFR3MemRead(PUVM pUVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, void *pvBuf, size_t cbRead)
197{
198 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
199 AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_CPU_ID);
200
201 if ((pAddress->fFlags & DBGFADDRESS_FLAGS_TYPE_MASK) == DBGFADDRESS_FLAGS_RING0)
202 {
203 AssertCompile(sizeof(RTHCUINTPTR) <= sizeof(pAddress->FlatPtr));
204 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
205 return VMMR3ReadR0Stack(pUVM->pVM, idCpu, (RTHCUINTPTR)pAddress->FlatPtr, pvBuf, cbRead);
206 }
207 return VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3MemRead, 5, pUVM, idCpu, pAddress, pvBuf, cbRead);
208}
209
210
211/**
212 * Read a zero terminated string from guest memory.
213 *
214 * @returns VBox status code.
215 *
216 * @param pUVM The user mode VM handle.
217 * @param idCpu The ID of the source CPU context (for the address).
218 * @param pAddress Where to start reading.
219 * @param pszBuf Where to store the string.
220 * @param cchBuf The size of the buffer.
221 */
222static DECLCALLBACK(int) dbgfR3MemReadString(PUVM pUVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, char *pszBuf, size_t cchBuf)
223{
224 /*
225 * Validate the input we use, PGM does the rest.
226 */
227 if (!DBGFR3AddrIsValid(pUVM, pAddress))
228 return VERR_INVALID_POINTER;
229 if (!RT_VALID_PTR(pszBuf))
230 return VERR_INVALID_POINTER;
231
232 /*
233 * Let dbgfR3MemRead do the job.
234 */
235 int rc = dbgfR3MemRead(pUVM, idCpu, pAddress, pszBuf, cchBuf);
236
237 /*
238 * Make sure the result is terminated and that overflow is signaled.
239 * This may look a bit reckless with the rc but, it should be fine.
240 */
241 if (!RTStrEnd(pszBuf, cchBuf))
242 {
243 pszBuf[cchBuf - 1] = '\0';
244 rc = VINF_BUFFER_OVERFLOW;
245 }
246 /*
247 * Handle partial reads (not perfect).
248 */
249 else if (RT_FAILURE(rc))
250 {
251 if (pszBuf[0])
252 rc = VINF_SUCCESS;
253 }
254
255 return rc;
256}
257
258
259/**
260 * Read a zero terminated string from guest memory.
261 *
262 * @returns VBox status code.
263 *
264 * @param pUVM The user mode VM handle.
265 * @param idCpu The ID of the source CPU context (for the address).
266 * @param pAddress Where to start reading.
267 * @param pszBuf Where to store the string.
268 * @param cchBuf The size of the buffer.
269 */
270VMMR3DECL(int) DBGFR3MemReadString(PUVM pUVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, char *pszBuf, size_t cchBuf)
271{
272 /*
273 * Validate and zero output.
274 */
275 if (!RT_VALID_PTR(pszBuf))
276 return VERR_INVALID_POINTER;
277 if (cchBuf <= 0)
278 return VERR_INVALID_PARAMETER;
279 memset(pszBuf, 0, cchBuf);
280 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
281 AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_CPU_ID);
282
283 /*
284 * Pass it on to the EMT.
285 */
286 return VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3MemReadString, 5, pUVM, idCpu, pAddress, pszBuf, cchBuf);
287}
288
289
290/**
291 * Writes guest memory.
292 *
293 * @returns VBox status code.
294 *
295 * @param pUVM The user mode VM handle.
296 * @param idCpu The ID of the target CPU context (for the address).
297 * @param pAddress Where to start writing.
298 * @param pvBuf The data to write.
299 * @param cbWrite The number of bytes to write.
300 */
301static DECLCALLBACK(int) dbgfR3MemWrite(PUVM pUVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, void const *pvBuf, size_t cbWrite)
302{
303 /*
304 * Validate the input we use, PGM does the rest.
305 */
306 if (!DBGFR3AddrIsValid(pUVM, pAddress))
307 return VERR_INVALID_POINTER;
308 if (!RT_VALID_PTR(pvBuf))
309 return VERR_INVALID_POINTER;
310 PVM pVM = pUVM->pVM;
311 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
312
313 /*
314 * Select PGM function by addressing mode.
315 */
316 int rc;
317 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
318 PGMMODE enmMode = PGMGetGuestMode(pVCpu);
319 if ( enmMode == PGMMODE_REAL
320 || enmMode == PGMMODE_PROTECTED
321 || DBGFADDRESS_IS_PHYS(pAddress) )
322 rc = PGMPhysSimpleWriteGCPhys(pVM, pAddress->FlatPtr, pvBuf, cbWrite);
323 else
324 {
325#if GC_ARCH_BITS > 32
326 if ( ( pAddress->FlatPtr >= _4G
327 || pAddress->FlatPtr + cbWrite > _4G)
328 && enmMode != PGMMODE_AMD64
329 && enmMode != PGMMODE_AMD64_NX)
330 return VERR_PAGE_TABLE_NOT_PRESENT;
331#endif
332 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pAddress->FlatPtr, pvBuf, cbWrite);
333 }
334 return rc;
335}
336
337
338/**
339 * Read guest memory.
340 *
341 * @returns VBox status code.
342 *
343 * @param pUVM The user mode VM handle.
344 * @param idCpu The ID of the target CPU context (for the address).
345 * @param pAddress Where to start writing.
346 * @param pvBuf The data to write.
347 * @param cbWrite The number of bytes to write.
348 */
349VMMR3DECL(int) DBGFR3MemWrite(PUVM pUVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, void const *pvBuf, size_t cbWrite)
350{
351 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
352 AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_CPU_ID);
353 return VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3MemWrite, 5, pUVM, idCpu, pAddress, pvBuf, cbWrite);
354}
355
356
357/**
358 * Worker for DBGFR3SelQueryInfo that calls into SELM.
359 */
360static DECLCALLBACK(int) dbgfR3SelQueryInfo(PUVM pUVM, VMCPUID idCpu, RTSEL Sel, uint32_t fFlags, PDBGFSELINFO pSelInfo)
361{
362 PVM pVM = pUVM->pVM;
363 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
364
365 /*
366 * Make the query.
367 */
368 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
369 VMCPU_ASSERT_EMT(pVCpu);
370 int rc = SELMR3GetSelectorInfo(pVCpu, Sel, pSelInfo);
371
372 /*
373 * 64-bit mode HACKS for making data and stack selectors wide open when
374 * queried. This is voodoo magic.
375 */
376 if (fFlags & DBGFSELQI_FLAGS_DT_ADJ_64BIT_MODE)
377 {
378 /* Expand 64-bit data and stack selectors. The check is a bit bogus... */
379 if ( RT_SUCCESS(rc)
380 && (pSelInfo->fFlags & ( DBGFSELINFO_FLAGS_LONG_MODE | DBGFSELINFO_FLAGS_REAL_MODE | DBGFSELINFO_FLAGS_PROT_MODE
381 | DBGFSELINFO_FLAGS_GATE | DBGFSELINFO_FLAGS_HYPER
382 | DBGFSELINFO_FLAGS_INVALID | DBGFSELINFO_FLAGS_NOT_PRESENT))
383 == DBGFSELINFO_FLAGS_LONG_MODE
384 && pSelInfo->cbLimit != ~(RTGCPTR)0
385 && CPUMIsGuestIn64BitCode(pVCpu) )
386 {
387 pSelInfo->GCPtrBase = 0;
388 pSelInfo->cbLimit = ~(RTGCPTR)0;
389 }
390 else if ( Sel == 0
391 && CPUMIsGuestIn64BitCode(pVCpu))
392 {
393 pSelInfo->GCPtrBase = 0;
394 pSelInfo->cbLimit = ~(RTGCPTR)0;
395 pSelInfo->Sel = 0;
396 pSelInfo->SelGate = 0;
397 pSelInfo->fFlags = DBGFSELINFO_FLAGS_LONG_MODE;
398 pSelInfo->u.Raw64.Gen.u1Present = 1;
399 pSelInfo->u.Raw64.Gen.u1Long = 1;
400 pSelInfo->u.Raw64.Gen.u1DescType = 1;
401 rc = VINF_SUCCESS;
402 }
403 }
404 return rc;
405}
406
407
408/**
409 * Gets information about a selector.
410 *
411 * Intended for the debugger mostly and will prefer the guest
412 * descriptor tables over the shadow ones.
413 *
414 * @returns VBox status code, the following are the common ones.
415 * @retval VINF_SUCCESS on success.
416 * @retval VERR_INVALID_SELECTOR if the selector isn't fully inside the
417 * descriptor table.
418 * @retval VERR_SELECTOR_NOT_PRESENT if the LDT is invalid or not present. This
419 * is not returned if the selector itself isn't present, you have to
420 * check that for yourself (see DBGFSELINFO::fFlags).
421 * @retval VERR_PAGE_TABLE_NOT_PRESENT or VERR_PAGE_NOT_PRESENT if the
422 * pagetable or page backing the selector table wasn't present.
423 *
424 * @param pUVM The user mode VM handle.
425 * @param idCpu The ID of the virtual CPU context.
426 * @param Sel The selector to get info about.
427 * @param fFlags Flags, see DBGFQSEL_FLAGS_*.
428 * @param pSelInfo Where to store the information. This will always be
429 * updated.
430 *
431 * @remarks This is a wrapper around SELMR3GetSelectorInfo and
432 * SELMR3GetShadowSelectorInfo.
433 */
434VMMR3DECL(int) DBGFR3SelQueryInfo(PUVM pUVM, VMCPUID idCpu, RTSEL Sel, uint32_t fFlags, PDBGFSELINFO pSelInfo)
435{
436 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
437 AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_CPU_ID);
438 AssertReturn(!(fFlags & ~(DBGFSELQI_FLAGS_DT_GUEST | DBGFSELQI_FLAGS_DT_ADJ_64BIT_MODE)), VERR_INVALID_PARAMETER);
439
440 /* Clear the return data here on this thread. */
441 memset(pSelInfo, 0, sizeof(*pSelInfo));
442
443 /*
444 * Dispatch the request to a worker running on the target CPU.
445 */
446 return VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3SelQueryInfo, 5, pUVM, idCpu, Sel, fFlags, pSelInfo);
447}
448
449
450/**
451 * Validates a CS selector.
452 *
453 * @returns VBox status code.
454 * @param pSelInfo Pointer to the selector information for the CS selector.
455 * @param SelCPL The selector defining the CPL (SS).
456 */
457VMMDECL(int) DBGFR3SelInfoValidateCS(PCDBGFSELINFO pSelInfo, RTSEL SelCPL)
458{
459 /*
460 * Check if present.
461 */
462 if (pSelInfo->u.Raw.Gen.u1Present)
463 {
464 /*
465 * Type check.
466 */
467 if ( pSelInfo->u.Raw.Gen.u1DescType == 1
468 && (pSelInfo->u.Raw.Gen.u4Type & X86_SEL_TYPE_CODE))
469 {
470 /*
471 * Check level.
472 */
473 unsigned uLevel = RT_MAX(SelCPL & X86_SEL_RPL, pSelInfo->Sel & X86_SEL_RPL);
474 if ( !(pSelInfo->u.Raw.Gen.u4Type & X86_SEL_TYPE_CONF)
475 ? uLevel <= pSelInfo->u.Raw.Gen.u2Dpl
476 : uLevel >= pSelInfo->u.Raw.Gen.u2Dpl /* hope I got this right now... */
477 )
478 return VINF_SUCCESS;
479 return VERR_INVALID_RPL;
480 }
481 return VERR_NOT_CODE_SELECTOR;
482 }
483 return VERR_SELECTOR_NOT_PRESENT;
484}
485
486
487/**
488 * Converts a PGM paging mode to a set of DBGFPGDMP_XXX flags.
489 *
490 * @returns Flags. UINT32_MAX if the mode is invalid (asserted).
491 * @param enmMode The mode.
492 */
493static uint32_t dbgfR3PagingDumpModeToFlags(PGMMODE enmMode)
494{
495 switch (enmMode)
496 {
497 case PGMMODE_32_BIT:
498 return DBGFPGDMP_FLAGS_PSE;
499 case PGMMODE_PAE:
500 return DBGFPGDMP_FLAGS_PSE | DBGFPGDMP_FLAGS_PAE;
501 case PGMMODE_PAE_NX:
502 return DBGFPGDMP_FLAGS_PSE | DBGFPGDMP_FLAGS_PAE | DBGFPGDMP_FLAGS_NXE;
503 case PGMMODE_AMD64:
504 return DBGFPGDMP_FLAGS_PSE | DBGFPGDMP_FLAGS_PAE | DBGFPGDMP_FLAGS_LME;
505 case PGMMODE_AMD64_NX:
506 return DBGFPGDMP_FLAGS_PSE | DBGFPGDMP_FLAGS_PAE | DBGFPGDMP_FLAGS_LME | DBGFPGDMP_FLAGS_NXE;
507 case PGMMODE_NESTED_32BIT:
508 return DBGFPGDMP_FLAGS_NP | DBGFPGDMP_FLAGS_PSE;
509 case PGMMODE_NESTED_PAE:
510 return DBGFPGDMP_FLAGS_NP | DBGFPGDMP_FLAGS_PSE | DBGFPGDMP_FLAGS_PAE | DBGFPGDMP_FLAGS_NXE;
511 case PGMMODE_NESTED_AMD64:
512 return DBGFPGDMP_FLAGS_NP | DBGFPGDMP_FLAGS_PSE | DBGFPGDMP_FLAGS_PAE | DBGFPGDMP_FLAGS_LME | DBGFPGDMP_FLAGS_NXE;
513 case PGMMODE_EPT:
514 return DBGFPGDMP_FLAGS_EPT;
515 case PGMMODE_NONE:
516 return 0;
517 default:
518 AssertFailedReturn(UINT32_MAX);
519 }
520}
521
522
523/**
524 * EMT worker for DBGFR3PagingDumpEx.
525 *
526 * @returns VBox status code.
527 * @param pUVM The shared VM handle.
528 * @param idCpu The current CPU ID.
529 * @param fFlags The flags, DBGFPGDMP_FLAGS_XXX. Valid.
530 * @param pcr3 The CR3 to use (unless we're getting the current
531 * state, see @a fFlags).
532 * @param pu64FirstAddr The first address.
533 * @param pu64LastAddr The last address.
534 * @param cMaxDepth The depth.
535 * @param pHlp The output callbacks.
536 */
537static DECLCALLBACK(int) dbgfR3PagingDumpEx(PUVM pUVM, VMCPUID idCpu, uint32_t fFlags, uint64_t *pcr3,
538 uint64_t *pu64FirstAddr, uint64_t *pu64LastAddr,
539 uint32_t cMaxDepth, PCDBGFINFOHLP pHlp)
540{
541 /*
542 * Implement dumping both context by means of recursion.
543 */
544 if ((fFlags & (DBGFPGDMP_FLAGS_GUEST | DBGFPGDMP_FLAGS_SHADOW)) == (DBGFPGDMP_FLAGS_GUEST | DBGFPGDMP_FLAGS_SHADOW))
545 {
546 int rc1 = dbgfR3PagingDumpEx(pUVM, idCpu, fFlags & ~DBGFPGDMP_FLAGS_GUEST,
547 pcr3, pu64FirstAddr, pu64LastAddr, cMaxDepth, pHlp);
548 int rc2 = dbgfR3PagingDumpEx(pUVM, idCpu, fFlags & ~DBGFPGDMP_FLAGS_SHADOW,
549 pcr3, pu64FirstAddr, pu64LastAddr, cMaxDepth, pHlp);
550 return RT_FAILURE(rc1) ? rc1 : rc2;
551 }
552
553 PVM pVM = pUVM->pVM;
554 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
555
556 /*
557 * Get the current CR3/mode if required.
558 */
559 uint64_t cr3 = *pcr3;
560 if (fFlags & (DBGFPGDMP_FLAGS_CURRENT_CR3 | DBGFPGDMP_FLAGS_CURRENT_MODE))
561 {
562 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
563 if (fFlags & DBGFPGDMP_FLAGS_SHADOW)
564 {
565 if (PGMGetShadowMode(pVCpu) == PGMMODE_NONE)
566 {
567 pHlp->pfnPrintf(pHlp, "Shadow paging mode is 'none' (NEM)\n");
568 return VINF_SUCCESS;
569 }
570
571 if (fFlags & DBGFPGDMP_FLAGS_CURRENT_CR3)
572 cr3 = PGMGetHyperCR3(pVCpu);
573 if (fFlags & DBGFPGDMP_FLAGS_CURRENT_MODE)
574 fFlags |= dbgfR3PagingDumpModeToFlags(PGMGetShadowMode(pVCpu));
575 }
576 else
577 {
578 if (fFlags & DBGFPGDMP_FLAGS_CURRENT_CR3)
579 cr3 = CPUMGetGuestCR3(pVCpu);
580 if (fFlags & DBGFPGDMP_FLAGS_CURRENT_MODE)
581 {
582 AssertCompile(DBGFPGDMP_FLAGS_PSE == X86_CR4_PSE); AssertCompile(DBGFPGDMP_FLAGS_PAE == X86_CR4_PAE);
583 fFlags |= CPUMGetGuestCR4(pVCpu) & (X86_CR4_PSE | X86_CR4_PAE);
584 AssertCompile(DBGFPGDMP_FLAGS_LME == MSR_K6_EFER_LME); AssertCompile(DBGFPGDMP_FLAGS_NXE == MSR_K6_EFER_NXE);
585 fFlags |= CPUMGetGuestEFER(pVCpu) & (MSR_K6_EFER_LME | MSR_K6_EFER_NXE);
586 }
587 }
588 }
589 fFlags &= ~(DBGFPGDMP_FLAGS_CURRENT_MODE | DBGFPGDMP_FLAGS_CURRENT_CR3);
590
591 /*
592 * Call PGM to do the real work.
593 */
594 int rc;
595 if (fFlags & DBGFPGDMP_FLAGS_SHADOW)
596 rc = PGMR3DumpHierarchyShw(pVM, cr3, fFlags, *pu64FirstAddr, *pu64LastAddr, cMaxDepth, pHlp);
597 else
598 rc = PGMR3DumpHierarchyGst(pVM, cr3, fFlags, *pu64FirstAddr, *pu64LastAddr, cMaxDepth, pHlp);
599 return rc;
600}
601
602
603/**
604 * Dump paging structures.
605 *
606 * This API can be used to dump both guest and shadow structures.
607 *
608 * @returns VBox status code.
609 * @param pUVM The user mode VM handle.
610 * @param idCpu The current CPU ID.
611 * @param fFlags The flags, DBGFPGDMP_FLAGS_XXX.
612 * @param cr3 The CR3 to use (unless we're getting the current
613 * state, see @a fFlags).
614 * @param u64FirstAddr The address to start dumping at.
615 * @param u64LastAddr The address to end dumping after.
616 * @param cMaxDepth The depth.
617 * @param pHlp The output callbacks. Defaults to the debug log if
618 * NULL.
619 */
620VMMDECL(int) DBGFR3PagingDumpEx(PUVM pUVM, VMCPUID idCpu, uint32_t fFlags, uint64_t cr3, uint64_t u64FirstAddr,
621 uint64_t u64LastAddr, uint32_t cMaxDepth, PCDBGFINFOHLP pHlp)
622{
623 /*
624 * Input validation.
625 */
626 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
627 AssertReturn(idCpu < pUVM->cCpus, VERR_INVALID_CPU_ID);
628 AssertReturn(!(fFlags & ~DBGFPGDMP_FLAGS_VALID_MASK), VERR_INVALID_PARAMETER);
629 AssertReturn(fFlags & (DBGFPGDMP_FLAGS_SHADOW | DBGFPGDMP_FLAGS_GUEST), VERR_INVALID_PARAMETER);
630 AssertReturn((fFlags & DBGFPGDMP_FLAGS_CURRENT_MODE) || !(fFlags & DBGFPGDMP_FLAGS_MODE_MASK), VERR_INVALID_PARAMETER);
631 AssertReturn( !(fFlags & DBGFPGDMP_FLAGS_EPT)
632 || !(fFlags & (DBGFPGDMP_FLAGS_LME | DBGFPGDMP_FLAGS_PAE | DBGFPGDMP_FLAGS_PSE | DBGFPGDMP_FLAGS_NXE))
633 , VERR_INVALID_PARAMETER);
634 AssertReturn(cMaxDepth, VERR_INVALID_PARAMETER);
635
636 /*
637 * Forward the request to the target CPU.
638 */
639 return VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3PagingDumpEx, 8,
640 pUVM, idCpu, fFlags, &cr3, &u64FirstAddr, &u64LastAddr, cMaxDepth, pHlp ? pHlp : DBGFR3InfoLogHlp());
641}
642
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette