VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/DBGF.cpp@ 86666

Last change on this file since 86666 was 86666, checked in by vboxsync, 4 years ago

include/VBox,VMM,DBGF: Some boilerplate for the new breakpoint manager which is disabled by default (can be built with VBOX_WITH_LOTS_OF_DBGF_BPS), bugref:9837

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 76.3 KB
Line 
1/* $Id: DBGF.cpp 86666 2020-10-21 15:01:32Z vboxsync $ */
2/** @file
3 * DBGF - Debugger Facility.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_dbgf DBGF - The Debugger Facility
20 *
21 * The purpose of the DBGF is to provide an interface for debuggers to
22 * manipulate the VMM without having to mess up the source code for each of
23 * them. The DBGF is always built in and will always work when a debugger
24 * attaches to the VM. The DBGF provides the basic debugger features, such as
25 * halting execution, handling breakpoints, single step execution, instruction
26 * disassembly, info querying, OS specific diggers, symbol and module
27 * management.
28 *
29 * The interface is working in a manner similar to the win32, linux and os2
30 * debugger interfaces. The interface has an asynchronous nature. This comes
31 * from the fact that the VMM and the Debugger are running in different threads.
32 * They are referred to as the "emulation thread" and the "debugger thread", or
33 * as the "ping thread" and the "pong thread, respectivly. (The last set of
34 * names comes from the use of the Ping-Pong synchronization construct from the
35 * RTSem API.)
36 *
37 * @see grp_dbgf
38 *
39 *
40 * @section sec_dbgf_scenario Usage Scenario
41 *
42 * The debugger starts by attaching to the VM. For practical reasons we limit the
43 * number of concurrently attached debuggers to 1 per VM. The action of
44 * attaching to the VM causes the VM to check and generate debug events.
45 *
46 * The debugger then will wait/poll for debug events and issue commands.
47 *
48 * The waiting and polling is done by the DBGFEventWait() function. It will wait
49 * for the emulation thread to send a ping, thus indicating that there is an
50 * event waiting to be processed.
51 *
52 * An event can be a response to a command issued previously, the hitting of a
53 * breakpoint, or running into a bad/fatal VMM condition. The debugger now has
54 * the ping and must respond to the event at hand - the VMM is waiting. This
55 * usually means that the user of the debugger must do something, but it doesn't
56 * have to. The debugger is free to call any DBGF function (nearly at least)
57 * while processing the event.
58 *
59 * Typically the user will issue a request for the execution to be resumed, so
60 * the debugger calls DBGFResume() and goes back to waiting/polling for events.
61 *
62 * When the user eventually terminates the debugging session or selects another
63 * VM, the debugger detaches from the VM. This means that breakpoints are
64 * disabled and that the emulation thread no longer polls for debugger commands.
65 *
66 */
67
68
69/*********************************************************************************************************************************
70* Header Files *
71*********************************************************************************************************************************/
72#define LOG_GROUP LOG_GROUP_DBGF
73#include <VBox/vmm/dbgf.h>
74#include <VBox/vmm/selm.h>
75#include <VBox/vmm/em.h>
76#include <VBox/vmm/hm.h>
77#include <VBox/vmm/mm.h>
78#include "DBGFInternal.h"
79#include <VBox/vmm/vm.h>
80#include <VBox/vmm/uvm.h>
81#include <VBox/err.h>
82
83#include <VBox/log.h>
84#include <iprt/semaphore.h>
85#include <iprt/thread.h>
86#include <iprt/asm.h>
87#include <iprt/time.h>
88#include <iprt/assert.h>
89#include <iprt/stream.h>
90#include <iprt/env.h>
91
92
93/*********************************************************************************************************************************
94* Structures and Typedefs *
95*********************************************************************************************************************************/
96/**
97 * Instruction type returned by dbgfStepGetCurInstrType.
98 */
99typedef enum DBGFSTEPINSTRTYPE
100{
101 DBGFSTEPINSTRTYPE_INVALID = 0,
102 DBGFSTEPINSTRTYPE_OTHER,
103 DBGFSTEPINSTRTYPE_RET,
104 DBGFSTEPINSTRTYPE_CALL,
105 DBGFSTEPINSTRTYPE_END,
106 DBGFSTEPINSTRTYPE_32BIT_HACK = 0x7fffffff
107} DBGFSTEPINSTRTYPE;
108
109
110/*********************************************************************************************************************************
111* Internal Functions *
112*********************************************************************************************************************************/
113DECLINLINE(int) dbgfR3SendEventWait(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx);
114DECLINLINE(DBGFCMD) dbgfR3CpuGetCmd(PUVMCPU pUVCpu);
115static int dbgfR3CpuWait(PVMCPU pVCpu);
116static int dbgfR3CpuCmd(PVMCPU pVCpu, DBGFCMD enmCmd, PDBGFCMDDATA pCmdData, bool *pfResumeExecution);
117static DBGFSTEPINSTRTYPE dbgfStepGetCurInstrType(PVM pVM, PVMCPU pVCpu);
118static bool dbgfStepAreWeThereYet(PVM pVM, PVMCPU pVCpu);
119
120
121
122/**
123 * Initializes the DBGF.
124 *
125 * @returns VBox status code.
126 * @param pVM The cross context VM structure.
127 */
128VMMR3_INT_DECL(int) DBGFR3Init(PVM pVM)
129{
130 PUVM pUVM = pVM->pUVM;
131 AssertCompile(sizeof(pUVM->dbgf.s) <= sizeof(pUVM->dbgf.padding));
132 AssertCompile(sizeof(pUVM->aCpus[0].dbgf.s) <= sizeof(pUVM->aCpus[0].dbgf.padding));
133
134 pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID;
135
136 /*
137 * The usual sideways mountain climbing style of init:
138 */
139 int rc = dbgfR3InfoInit(pUVM); /* (First, initalizes the shared critical section.) */
140 if (RT_SUCCESS(rc))
141 {
142 rc = dbgfR3TraceInit(pVM);
143 if (RT_SUCCESS(rc))
144 {
145 rc = dbgfR3RegInit(pUVM);
146 if (RT_SUCCESS(rc))
147 {
148 rc = dbgfR3AsInit(pUVM);
149 if (RT_SUCCESS(rc))
150 {
151 rc = dbgfR3BpInit(pVM);
152 if (RT_SUCCESS(rc))
153 {
154 rc = dbgfR3OSInit(pUVM);
155 if (RT_SUCCESS(rc))
156 {
157 rc = dbgfR3PlugInInit(pUVM);
158 if (RT_SUCCESS(rc))
159 {
160 rc = dbgfR3BugCheckInit(pVM);
161 if (RT_SUCCESS(rc))
162 {
163#ifdef VBOX_WITH_DBGF_TRACING
164 rc = dbgfR3TracerInit(pVM);
165#endif
166 if (RT_SUCCESS(rc))
167 {
168 return VINF_SUCCESS;
169 }
170 }
171 dbgfR3PlugInTerm(pUVM);
172 }
173 dbgfR3OSTermPart1(pUVM);
174 dbgfR3OSTermPart2(pUVM);
175 }
176#ifdef VBOX_WITH_LOTS_OF_DBGF_BPS
177 dbgfR3BpTerm(pVM);
178#endif
179 }
180 dbgfR3AsTerm(pUVM);
181 }
182 dbgfR3RegTerm(pUVM);
183 }
184 dbgfR3TraceTerm(pVM);
185 }
186 dbgfR3InfoTerm(pUVM);
187 }
188 return rc;
189}
190
191
192/**
193 * Terminates and cleans up resources allocated by the DBGF.
194 *
195 * @returns VBox status code.
196 * @param pVM The cross context VM structure.
197 */
198VMMR3_INT_DECL(int) DBGFR3Term(PVM pVM)
199{
200 PUVM pUVM = pVM->pUVM;
201
202#ifdef VBOX_WITH_DBGF_TRACING
203 dbgfR3TracerTerm(pVM);
204#endif
205 dbgfR3OSTermPart1(pUVM);
206 dbgfR3PlugInTerm(pUVM);
207 dbgfR3OSTermPart2(pUVM);
208#ifdef VBOX_WITH_LOTS_OF_DBGF_BPS
209 dbgfR3BpTerm(pVM);
210#endif
211 dbgfR3AsTerm(pUVM);
212 dbgfR3RegTerm(pUVM);
213 dbgfR3TraceTerm(pVM);
214 dbgfR3InfoTerm(pUVM);
215
216 return VINF_SUCCESS;
217}
218
219
220/**
221 * This is for tstCFGM and others to avoid trigger leak detection.
222 *
223 * @returns VBox status code.
224 * @param pUVM The user mode VM structure.
225 */
226VMMR3DECL(void) DBGFR3TermUVM(PUVM pUVM)
227{
228 dbgfR3InfoTerm(pUVM);
229}
230
231
232/**
233 * Called when the VM is powered off to detach debuggers.
234 *
235 * @param pVM The cross context VM structure.
236 */
237VMMR3_INT_DECL(void) DBGFR3PowerOff(PVM pVM)
238{
239 /*
240 * Send a termination event to any attached debugger.
241 */
242 if (pVM->dbgf.s.fAttached)
243 {
244 PVMCPU pVCpu = VMMGetCpu(pVM);
245 int rc = dbgfR3SendEventWait(pVM, pVCpu, DBGFEVENT_POWERING_OFF, DBGFEVENTCTX_OTHER);
246 AssertLogRelRC(rc);
247
248 /*
249 * Clear the FF so we won't get confused later on.
250 */
251 VM_FF_CLEAR(pVM, VM_FF_DBGF);
252 }
253}
254
255
256/**
257 * Applies relocations to data and code managed by this
258 * component. This function will be called at init and
259 * whenever the VMM need to relocate it self inside the GC.
260 *
261 * @param pVM The cross context VM structure.
262 * @param offDelta Relocation delta relative to old location.
263 */
264VMMR3_INT_DECL(void) DBGFR3Relocate(PVM pVM, RTGCINTPTR offDelta)
265{
266 dbgfR3TraceRelocate(pVM);
267 dbgfR3AsRelocate(pVM->pUVM, offDelta);
268}
269
270
271/**
272 * Waits a little while for a debuggger to attach.
273 *
274 * @returns True is a debugger have attached.
275 * @param pVM The cross context VM structure.
276 * @param pVCpu The cross context per CPU structure.
277 * @param enmEvent Event.
278 *
279 * @thread EMT(pVCpu)
280 */
281bool dbgfR3WaitForAttach(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmEvent)
282{
283 /*
284 * First a message.
285 */
286#if !defined(DEBUG)
287 int cWait = 10;
288#else
289 int cWait = !VM_IS_RAW_MODE_ENABLED(pVM)
290 && ( enmEvent == DBGFEVENT_ASSERTION_HYPER
291 || enmEvent == DBGFEVENT_FATAL_ERROR)
292 && !RTEnvExist("VBOX_DBGF_WAIT_FOR_ATTACH")
293 ? 10
294 : 150;
295#endif
296 RTStrmPrintf(g_pStdErr, "DBGF: No debugger attached, waiting %d second%s for one to attach (event=%d)\n",
297 cWait / 10, cWait != 10 ? "s" : "", enmEvent);
298 RTStrmFlush(g_pStdErr);
299 while (cWait > 0)
300 {
301 RTThreadSleep(100);
302 if (pVM->dbgf.s.fAttached)
303 {
304 RTStrmPrintf(g_pStdErr, "Attached!\n");
305 RTStrmFlush(g_pStdErr);
306 return true;
307 }
308
309 /* Process rendezvous (debugger attaching involves such). */
310 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
311 {
312 int rc = VMMR3EmtRendezvousFF(pVM, pVCpu); AssertRC(rc);
313 if (rc != VINF_SUCCESS)
314 {
315 /** @todo Ignoring these could be bad. */
316 RTStrmPrintf(g_pStdErr, "[rcRendezvous=%Rrc, ignored!]", rc);
317 RTStrmFlush(g_pStdErr);
318 }
319 }
320
321 /* Process priority stuff. */
322 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
323 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
324 {
325 int rc = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, true /*fPriorityOnly*/);
326 if (rc == VINF_SUCCESS)
327 rc = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, true /*fPriorityOnly*/);
328 if (rc != VINF_SUCCESS)
329 {
330 /** @todo Ignoring these could be bad. */
331 RTStrmPrintf(g_pStdErr, "[rcReq=%Rrc, ignored!]", rc);
332 RTStrmFlush(g_pStdErr);
333 }
334 }
335
336 /* next */
337 if (!(cWait % 10))
338 {
339 RTStrmPrintf(g_pStdErr, "%d.", cWait / 10);
340 RTStrmFlush(g_pStdErr);
341 }
342 cWait--;
343 }
344
345 RTStrmPrintf(g_pStdErr, "Stopping the VM!\n");
346 RTStrmFlush(g_pStdErr);
347 return false;
348}
349
350
351/**
352 * Forced action callback.
353 *
354 * The VMM will call this from it's main loop when either VM_FF_DBGF or
355 * VMCPU_FF_DBGF are set.
356 *
357 * The function checks for and executes pending commands from the debugger.
358 * Then it checks for pending debug events and serves these.
359 *
360 * @returns VINF_SUCCESS normally.
361 * @returns VERR_DBGF_RAISE_FATAL_ERROR to pretend a fatal error happened.
362 * @param pVM The cross context VM structure.
363 * @param pVCpu The cross context per CPU structure.
364 */
365VMMR3_INT_DECL(int) DBGFR3VMMForcedAction(PVM pVM, PVMCPU pVCpu)
366{
367 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
368
369 /*
370 * Dispatch pending events.
371 */
372 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_DBGF))
373 {
374 if ( pVCpu->dbgf.s.cEvents > 0
375 && pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].enmState == DBGFEVENTSTATE_CURRENT)
376 {
377 rcStrict = DBGFR3EventHandlePending(pVM, pVCpu);
378 /** @todo may end up with VERR_DBGF_NOT_ATTACHED here, which will prove fatal... */
379 }
380
381 /*
382 * Command pending? Process it.
383 */
384 PUVMCPU pUVCpu = pVCpu->pUVCpu;
385 if (pUVCpu->dbgf.s.enmDbgfCmd != DBGFCMD_NO_COMMAND)
386 {
387 bool fResumeExecution;
388 DBGFCMDDATA CmdData = pUVCpu->dbgf.s.DbgfCmdData;
389 DBGFCMD enmCmd = dbgfR3CpuGetCmd(pUVCpu);
390 VBOXSTRICTRC rcStrict2 = dbgfR3CpuCmd(pVCpu, enmCmd, &CmdData, &fResumeExecution);
391 if (!fResumeExecution)
392 rcStrict2 = dbgfR3CpuWait(pVCpu);
393 if ( rcStrict2 != VINF_SUCCESS
394 && ( rcStrict == VINF_SUCCESS
395 || RT_FAILURE(rcStrict2)
396 || rcStrict2 < rcStrict) ) /** @todo oversimplified? */
397 rcStrict = rcStrict2;
398 }
399 }
400
401 return VBOXSTRICTRC_TODO(rcStrict);
402}
403
404
405/**
406 * Try to determine the event context.
407 *
408 * @returns debug event context.
409 * @param pVCpu The cross context vCPU structure.
410 */
411static DBGFEVENTCTX dbgfR3FigureEventCtx(PVMCPU pVCpu)
412{
413 switch (EMGetState(pVCpu))
414 {
415 case EMSTATE_HM:
416 case EMSTATE_NEM:
417 case EMSTATE_DEBUG_GUEST_HM:
418 case EMSTATE_DEBUG_GUEST_NEM:
419 return DBGFEVENTCTX_HM;
420
421 case EMSTATE_IEM:
422 case EMSTATE_RAW:
423 case EMSTATE_IEM_THEN_REM:
424 case EMSTATE_DEBUG_GUEST_IEM:
425 case EMSTATE_DEBUG_GUEST_RAW:
426 return DBGFEVENTCTX_RAW;
427
428
429 case EMSTATE_REM:
430 case EMSTATE_DEBUG_GUEST_REM:
431 return DBGFEVENTCTX_REM;
432
433 case EMSTATE_DEBUG_HYPER:
434 case EMSTATE_GURU_MEDITATION:
435 return DBGFEVENTCTX_HYPER;
436
437 default:
438 return DBGFEVENTCTX_OTHER;
439 }
440}
441
442
443/**
444 * Sends the event to the debugger (i.e. adds it to the event ring buffer).
445 *
446 * @returns VBox status code.
447 * @param pVM The cross context VM structure.
448 * @param pVCpu The CPU sending the event.
449 * @param enmType The event type to send.
450 * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
451 * @param pvPayload Event payload (DBGFEVENT::u data), optional.
452 * @param cbPayload The size of the event payload, optional.
453 */
454static int dbgfR3SendEventWorker(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx,
455 void const *pvPayload, size_t cbPayload)
456{
457 PUVM pUVM = pVM->pUVM;
458 pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID; /** @todo per vCPU stepping filter. */
459
460 /*
461 * Massage the input a little.
462 */
463 AssertStmt(cbPayload <= RT_SIZEOFMEMB(DBGFEVENT, u), cbPayload = RT_SIZEOFMEMB(DBGFEVENT, u));
464 if (enmCtx == DBGFEVENTCTX_INVALID)
465 enmCtx = dbgfR3FigureEventCtx(pVCpu);
466
467 /*
468 * Put the event into the ring buffer.
469 */
470 RTSemFastMutexRequest(pUVM->dbgf.s.hMtxDbgEvtWr);
471
472 uint32_t const cDbgEvtMax = RT_MAX(1, pUVM->dbgf.s.cDbgEvtMax);
473 uint32_t const idxDbgEvtWrite = ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite);
474 uint32_t const idxDbgEvtRead = ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtRead);
475 /** @todo Handle full buffer. */ RT_NOREF(idxDbgEvtRead);
476
477 PDBGFEVENT pEvent = &pUVM->dbgf.s.paDbgEvts[idxDbgEvtWrite % cDbgEvtMax];
478
479#ifdef DEBUG
480 ASMMemFill32(pEvent, sizeof(*pEvent), UINT32_C(0xdeadbeef));
481#endif
482 pEvent->enmType = enmType;
483 pEvent->enmCtx = enmCtx;
484 pEvent->idCpu = pVCpu->idCpu;
485 pEvent->uReserved = 0;
486 if (cbPayload)
487 memcpy(&pEvent->u, pvPayload, cbPayload);
488
489 ASMAtomicWriteU32(&pUVM->dbgf.s.idxDbgEvtWrite, (idxDbgEvtWrite + 1) % cDbgEvtMax);
490
491 RTSemFastMutexRelease(pUVM->dbgf.s.hMtxDbgEvtWr);
492
493 /*
494 * Signal the debugger.
495 */
496 return RTSemEventSignal(pUVM->dbgf.s.hEvtWait);
497}
498
499
500/**
501 * Send event and wait for the debugger to respond.
502 *
503 * @returns Strict VBox status code.
504 * @param pVM The cross context VM structure.
505 * @param pVCpu The CPU sending the event.
506 * @param enmType The event type to send.
507 * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
508 */
509DECLINLINE(int) dbgfR3SendEventWait(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx)
510{
511 int rc = dbgfR3SendEventWorker(pVM, pVCpu, enmType, enmCtx, NULL, 0);
512 if (RT_SUCCESS(rc))
513 rc = dbgfR3CpuWait(pVCpu);
514 return rc;
515}
516
517
518/**
519 * Send event and wait for the debugger to respond, extended version.
520 *
521 * @returns Strict VBox status code.
522 * @param pVM The cross context VM structure.
523 * @param pVCpu The CPU sending the event.
524 * @param enmType The event type to send.
525 * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
526 * @param pvPayload Event payload (DBGFEVENT::u data), optional.
527 * @param cbPayload The size of the event payload, optional.
528 */
529DECLINLINE(int) dbgfR3SendEventWaitEx(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx,
530 void const *pvPayload, size_t cbPayload)
531{
532 int rc = dbgfR3SendEventWorker(pVM, pVCpu, enmType, enmCtx, pvPayload, cbPayload);
533 if (RT_SUCCESS(rc))
534 rc = dbgfR3CpuWait(pVCpu);
535 return rc;
536}
537
538
539/**
540 * Send event but do NOT wait for the debugger.
541 *
542 * Currently only used by dbgfR3CpuCmd().
543 *
544 * @param pVM The cross context VM structure.
545 * @param pVCpu The CPU sending the event.
546 * @param enmType The event type to send.
547 * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
548 */
549DECLINLINE(int) dbgfR3SendEventNoWait(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx)
550{
551 return dbgfR3SendEventWorker(pVM, pVCpu, enmType, enmCtx, NULL, 0);
552}
553
554
555/**
556 * The common event prologue code.
557 *
558 * It will make sure someone is attached, and perhaps process any high priority
559 * pending actions (none yet).
560 *
561 * @returns VBox status code.
562 * @param pVM The cross context VM structure.
563 * @param pVCpu The vCPU cross context structure.
564 * @param enmEvent The event to be sent.
565 */
566static int dbgfR3EventPrologue(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmEvent)
567{
568 /*
569 * Check if a debugger is attached.
570 */
571 if ( !pVM->dbgf.s.fAttached
572 && !dbgfR3WaitForAttach(pVM, pVCpu, enmEvent))
573 {
574 Log(("dbgfR3EventPrologue: enmEvent=%d - debugger not attached\n", enmEvent));
575 return VERR_DBGF_NOT_ATTACHED;
576 }
577
578 /*
579 * Look thru pending commands and finish those which make sense now.
580 */
581 /** @todo Process/purge pending commands. */
582 //int rc = DBGFR3VMMForcedAction(pVM);
583 return VINF_SUCCESS;
584}
585
586
587/**
588 * Processes a pending event on the current CPU.
589 *
590 * This is called by EM in response to VINF_EM_DBG_EVENT.
591 *
592 * @returns Strict VBox status code.
593 * @param pVM The cross context VM structure.
594 * @param pVCpu The cross context per CPU structure.
595 *
596 * @thread EMT(pVCpu)
597 */
598VMMR3_INT_DECL(VBOXSTRICTRC) DBGFR3EventHandlePending(PVM pVM, PVMCPU pVCpu)
599{
600 VMCPU_ASSERT_EMT(pVCpu);
601 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_DBGF);
602
603 /*
604 * Check that we've got an event first.
605 */
606 AssertReturn(pVCpu->dbgf.s.cEvents > 0, VINF_SUCCESS);
607 AssertReturn(pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].enmState == DBGFEVENTSTATE_CURRENT, VINF_SUCCESS);
608 PDBGFEVENT pEvent = &pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].Event;
609
610 /*
611 * Make sure we've got a debugger and is allowed to speak to it.
612 */
613 int rc = dbgfR3EventPrologue(pVM, pVCpu, pEvent->enmType);
614 if (RT_FAILURE(rc))
615 {
616 /** @todo drop them events? */
617 return rc; /** @todo this will cause trouble if we're here via an FF! */
618 }
619
620 /*
621 * Send the event and mark it as ignore.
622 * ASSUMES no new events get generate while dbgfR3CpuWait is executing!
623 */
624 VBOXSTRICTRC rcStrict = dbgfR3SendEventWaitEx(pVM, pVCpu, pEvent->enmType, pEvent->enmCtx, &pEvent->u, sizeof(pEvent->u));
625 pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].enmState = DBGFEVENTSTATE_IGNORE;
626 return rcStrict;
627}
628
629
630/**
631 * Send a generic debugger event which takes no data.
632 *
633 * @returns VBox status code.
634 * @param pVM The cross context VM structure.
635 * @param enmEvent The event to send.
636 * @internal
637 */
638VMMR3DECL(int) DBGFR3Event(PVM pVM, DBGFEVENTTYPE enmEvent)
639{
640 PVMCPU pVCpu = VMMGetCpu(pVM);
641 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
642
643 /*
644 * Do stepping filtering.
645 */
646 /** @todo Would be better if we did some of this inside the execution
647 * engines. */
648 if ( enmEvent == DBGFEVENT_STEPPED
649 || enmEvent == DBGFEVENT_STEPPED_HYPER)
650 {
651 if (!dbgfStepAreWeThereYet(pVM, pVCpu))
652 return VINF_EM_DBG_STEP;
653 }
654
655 int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
656 if (RT_FAILURE(rc))
657 return rc;
658
659 /*
660 * Send the event and process the reply communication.
661 */
662 return dbgfR3SendEventWait(pVM, pVCpu, enmEvent, DBGFEVENTCTX_INVALID);
663}
664
665
666/**
667 * Send a debugger event which takes the full source file location.
668 *
669 * @returns VBox status code.
670 * @param pVM The cross context VM structure.
671 * @param enmEvent The event to send.
672 * @param pszFile Source file.
673 * @param uLine Line number in source file.
674 * @param pszFunction Function name.
675 * @param pszFormat Message which accompanies the event.
676 * @param ... Message arguments.
677 * @internal
678 */
679VMMR3DECL(int) DBGFR3EventSrc(PVM pVM, DBGFEVENTTYPE enmEvent, const char *pszFile, unsigned uLine, const char *pszFunction, const char *pszFormat, ...)
680{
681 va_list args;
682 va_start(args, pszFormat);
683 int rc = DBGFR3EventSrcV(pVM, enmEvent, pszFile, uLine, pszFunction, pszFormat, args);
684 va_end(args);
685 return rc;
686}
687
688
689/**
690 * Send a debugger event which takes the full source file location.
691 *
692 * @returns VBox status code.
693 * @param pVM The cross context VM structure.
694 * @param enmEvent The event to send.
695 * @param pszFile Source file.
696 * @param uLine Line number in source file.
697 * @param pszFunction Function name.
698 * @param pszFormat Message which accompanies the event.
699 * @param args Message arguments.
700 * @internal
701 */
702VMMR3DECL(int) DBGFR3EventSrcV(PVM pVM, DBGFEVENTTYPE enmEvent, const char *pszFile, unsigned uLine, const char *pszFunction, const char *pszFormat, va_list args)
703{
704 PVMCPU pVCpu = VMMGetCpu(pVM);
705 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
706
707 int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
708 if (RT_FAILURE(rc))
709 return rc;
710
711 /*
712 * Format the message.
713 */
714 char *pszMessage = NULL;
715 char szMessage[8192];
716 if (pszFormat && *pszFormat)
717 {
718 pszMessage = &szMessage[0];
719 RTStrPrintfV(szMessage, sizeof(szMessage), pszFormat, args);
720 }
721
722 /*
723 * Send the event and process the reply communication.
724 */
725 DBGFEVENT DbgEvent; /** @todo split up DBGFEVENT so we can skip the dead wait on the stack? */
726 DbgEvent.u.Src.pszFile = pszFile;
727 DbgEvent.u.Src.uLine = uLine;
728 DbgEvent.u.Src.pszFunction = pszFunction;
729 DbgEvent.u.Src.pszMessage = pszMessage;
730 return dbgfR3SendEventWaitEx(pVM, pVCpu, enmEvent, DBGFEVENTCTX_INVALID, &DbgEvent.u, sizeof(DbgEvent.u.Src));
731}
732
733
734/**
735 * Send a debugger event which takes the two assertion messages.
736 *
737 * @returns VBox status code.
738 * @param pVM The cross context VM structure.
739 * @param enmEvent The event to send.
740 * @param pszMsg1 First assertion message.
741 * @param pszMsg2 Second assertion message.
742 */
743VMMR3_INT_DECL(int) DBGFR3EventAssertion(PVM pVM, DBGFEVENTTYPE enmEvent, const char *pszMsg1, const char *pszMsg2)
744{
745 PVMCPU pVCpu = VMMGetCpu(pVM);
746 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
747
748 int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
749 if (RT_FAILURE(rc))
750 return rc;
751
752 /*
753 * Send the event and process the reply communication.
754 */
755 DBGFEVENT DbgEvent;
756 DbgEvent.u.Assert.pszMsg1 = pszMsg1;
757 DbgEvent.u.Assert.pszMsg2 = pszMsg2;
758 return dbgfR3SendEventWaitEx(pVM, pVCpu, enmEvent, DBGFEVENTCTX_INVALID, &DbgEvent.u, sizeof(DbgEvent.u.Assert));
759}
760
761
762/**
763 * Breakpoint was hit somewhere.
764 * Figure out which breakpoint it is and notify the debugger.
765 *
766 * @returns VBox status code.
767 * @param pVM The cross context VM structure.
768 * @param enmEvent DBGFEVENT_BREAKPOINT_HYPER or DBGFEVENT_BREAKPOINT.
769 */
770VMMR3_INT_DECL(int) DBGFR3EventBreakpoint(PVM pVM, DBGFEVENTTYPE enmEvent)
771{
772 PVMCPU pVCpu = VMMGetCpu(pVM);
773 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
774
775 int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
776 if (RT_FAILURE(rc))
777 return rc;
778
779 /*
780 * Send the event and process the reply communication.
781 */
782 DBGFEVENT DbgEvent;
783#ifndef VBOX_WITH_LOTS_OF_DBGF_BPS
784 RTUINT iBp = DbgEvent.u.Bp.iBp = pVCpu->dbgf.s.iActiveBp;
785 pVCpu->dbgf.s.iActiveBp = ~0U;
786 if (iBp != ~0U)
787 {
788 DbgEvent.enmCtx = DBGFEVENTCTX_RAW;
789 return dbgfR3SendEventWaitEx(pVM, pVCpu, enmEvent, DBGFEVENTCTX_RAW, &DbgEvent.u, sizeof(DbgEvent.u.Bp));
790 }
791#else
792 DbgEvent.u.Bp.hBp = pVCpu->dbgf.s.hBpActive;
793 pVCpu->dbgf.s.hBpActive = NIL_DBGFBP;
794 if (DbgEvent.u.Bp.hBp != NIL_DBGFBP)
795 {
796 DbgEvent.enmCtx = DBGFEVENTCTX_RAW;
797 return dbgfR3SendEventWaitEx(pVM, pVCpu, enmEvent, DBGFEVENTCTX_RAW, &DbgEvent.u, sizeof(DbgEvent.u.Bp));
798 }
799#endif
800
801#ifndef VBOX_WITH_LOTS_OF_DBGF_BPS
802 AssertFailed(); /** @todo this should be obsolete now... */
803
804 /* REM breakpoints has be been searched for. */
805#if 0 /** @todo get flat PC api! */
806 uint32_t eip = CPUMGetGuestEIP(pVM);
807#else
808 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
809 RTGCPTR eip = pCtx->rip + pCtx->cs.u64Base;
810#endif
811 for (size_t i = 0; i < RT_ELEMENTS(pVM->dbgf.s.aBreakpoints); i++)
812 if ( pVM->dbgf.s.aBreakpoints[i].enmType == DBGFBPTYPE_REM
813 && pVM->dbgf.s.aBreakpoints[i].u.Rem.GCPtr == eip)
814 {
815 DbgEvent.u.Bp.iBp = pVM->dbgf.s.aBreakpoints[i].iBp;
816 break;
817 }
818 AssertMsg(DbgEvent.u.Bp.iBp != ~0U, ("eip=%08x\n", eip));
819 return dbgfR3SendEventWaitEx(pVM, pVCpu, enmEvent, DBGFEVENTCTX_REM, &DbgEvent.u, sizeof(DbgEvent.u.Bp));
820#else
821 return VERR_DBGF_IPE_1;
822#endif
823}
824
825
826/**
827 * Returns whether the given vCPU is waiting for the debugger.
828 *
829 * @returns Flags whether the vCPU is currently waiting for the debugger.
830 * @param pUVCpu The user mode vCPU structure.
831 */
832DECLINLINE(bool) dbgfR3CpuIsHalted(PUVMCPU pUVCpu)
833{
834 return ASMAtomicReadBool(&pUVCpu->dbgf.s.fStopped);
835}
836
837
838/**
839 * Checks whether the given vCPU is waiting in the debugger.
840 *
841 * @returns Flag whether the indicated vCPU is halted, when VMCPUID_ALL
842 * is given true is returned when at least one vCPU is halted.
843 * @param pUVM The user mode VM structure.
844 * @param idCpu The CPU ID to check, VMCPUID_ALL to check all vCPUs.
845 */
846DECLINLINE(bool) dbgfR3CpuAreAnyHaltedByCpuId(PUVM pUVM, VMCPUID idCpu)
847{
848 AssertReturn(idCpu < pUVM->cCpus || idCpu == VMCPUID_ALL, false);
849
850 /* Check that either the given vCPU or all are actually halted. */
851 if (idCpu != VMCPUID_ALL)
852 return dbgfR3CpuIsHalted(&pUVM->aCpus[idCpu]);
853
854 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
855 if (dbgfR3CpuIsHalted(&pUVM->aCpus[i]))
856 return true;
857 return false;
858}
859
860
861/**
862 * Gets the pending debug command for this EMT/CPU, replacing it with
863 * DBGFCMD_NO_COMMAND.
864 *
865 * @returns Pending command.
866 * @param pUVCpu The user mode virtual CPU structure.
867 * @thread EMT(pUVCpu)
868 */
869DECLINLINE(DBGFCMD) dbgfR3CpuGetCmd(PUVMCPU pUVCpu)
870{
871 DBGFCMD enmCmd = (DBGFCMD)ASMAtomicXchgU32((uint32_t volatile *)(void *)&pUVCpu->dbgf.s.enmDbgfCmd, DBGFCMD_NO_COMMAND);
872 Log2(("DBGF: Getting command: %d\n", enmCmd));
873 return enmCmd;
874}
875
876
877/**
878 * Send a debug command to a CPU, making sure to notify it.
879 *
880 * @returns VBox status code.
881 * @param pUVCpu The user mode virtual CPU structure.
882 * @param enmCmd The command to submit to the CPU.
883 */
884DECLINLINE(int) dbgfR3CpuSetCmdAndNotify(PUVMCPU pUVCpu, DBGFCMD enmCmd)
885{
886 Log2(("DBGF: Setting command to %d\n", enmCmd));
887 Assert(enmCmd != DBGFCMD_NO_COMMAND);
888 AssertMsg(pUVCpu->dbgf.s.enmDbgfCmd == DBGFCMD_NO_COMMAND, ("enmCmd=%d enmDbgfCmd=%d\n", enmCmd, pUVCpu->dbgf.s.enmDbgfCmd));
889
890 ASMAtomicWriteU32((uint32_t volatile *)(void *)&pUVCpu->dbgf.s.enmDbgfCmd, enmCmd);
891 VMCPU_FF_SET(pUVCpu->pVCpu, VMCPU_FF_DBGF);
892
893 VMR3NotifyCpuFFU(pUVCpu, 0 /*fFlags*/);
894 return VINF_SUCCESS;
895}
896
897
898/**
899 * Waits for the debugger to respond.
900 *
901 * @returns VBox status code. (clearify)
902 * @param pVCpu The cross context vCPU structure.
903 */
904static int dbgfR3CpuWait(PVMCPU pVCpu)
905{
906 PVM pVM = pVCpu->CTX_SUFF(pVM);
907 PUVMCPU pUVCpu = pVCpu->pUVCpu;
908
909 LogFlow(("dbgfR3CpuWait:\n"));
910 int rcRet = VINF_SUCCESS;
911
912 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, true);
913
914 /*
915 * Waits for the debugger to reply (i.e. issue an command).
916 */
917 for (;;)
918 {
919 /*
920 * Wait.
921 */
922 for (;;)
923 {
924 /*
925 * Process forced flags before we go sleep.
926 */
927 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_DBGF | VMCPU_FF_REQUEST)
928 || VM_FF_IS_ANY_SET(pVM, VM_FF_EMT_RENDEZVOUS | VMCPU_FF_REQUEST | VM_FF_CHECK_VM_STATE))
929 {
930 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF))
931 break;
932
933 int rc;
934 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
935 rc = VMMR3EmtRendezvousFF(pVM, pVCpu);
936 else if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
937 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
938 {
939 LogFlow(("dbgfR3CpuWait: Processes requests...\n"));
940 rc = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
941 if (rc == VINF_SUCCESS)
942 rc = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
943 LogFlow(("dbgfR3CpuWait: VMR3ReqProcess -> %Rrc rcRet=%Rrc\n", rc, rcRet));
944 }
945 else if (VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
946 {
947 VMSTATE enmState = VMR3GetState(pVM);
948 switch (enmState)
949 {
950 case VMSTATE_FATAL_ERROR:
951 case VMSTATE_FATAL_ERROR_LS:
952 case VMSTATE_GURU_MEDITATION:
953 case VMSTATE_GURU_MEDITATION_LS:
954 rc = VINF_EM_SUSPEND;
955 break;
956 case VMSTATE_DESTROYING:
957 rc = VINF_EM_TERMINATE;
958 break;
959 default:
960 rc = VERR_DBGF_IPE_1;
961 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
962 }
963 }
964 else
965 rc = VINF_SUCCESS;
966 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
967 {
968 switch (rc)
969 {
970 case VINF_EM_DBG_BREAKPOINT:
971 case VINF_EM_DBG_STEPPED:
972 case VINF_EM_DBG_STEP:
973 case VINF_EM_DBG_STOP:
974 case VINF_EM_DBG_EVENT:
975 AssertMsgFailed(("rc=%Rrc\n", rc));
976 break;
977
978 /* return straight away */
979 case VINF_EM_TERMINATE:
980 case VINF_EM_OFF:
981 LogFlow(("dbgfR3CpuWait: returns %Rrc\n", rc));
982 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
983 return rc;
984
985 /* remember return code. */
986 default:
987 AssertReleaseMsgFailed(("rc=%Rrc is not in the switch!\n", rc));
988 RT_FALL_THRU();
989 case VINF_EM_RESET:
990 case VINF_EM_SUSPEND:
991 case VINF_EM_HALT:
992 case VINF_EM_RESUME:
993 case VINF_EM_RESCHEDULE:
994 case VINF_EM_RESCHEDULE_REM:
995 case VINF_EM_RESCHEDULE_RAW:
996 if (rc < rcRet || rcRet == VINF_SUCCESS)
997 rcRet = rc;
998 break;
999 }
1000 }
1001 else if (RT_FAILURE(rc))
1002 {
1003 LogFlow(("dbgfR3CpuWait: returns %Rrc\n", rc));
1004 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
1005 return rc;
1006 }
1007 }
1008 else if (pVM->dbgf.s.fAttached)
1009 {
1010 int rc = VMR3WaitU(pUVCpu);
1011 if (RT_FAILURE(rc))
1012 {
1013 LogFlow(("dbgfR3CpuWait: returns %Rrc (VMR3WaitU)\n", rc));
1014 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
1015 return rc;
1016 }
1017 }
1018 else
1019 {
1020 LogFlow(("dbgfR3CpuWait: Debugger detached, continuing normal execution (%Rrc)\n", rcRet));
1021 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
1022 return rcRet;
1023 }
1024 }
1025
1026 /*
1027 * Process the command.
1028 */
1029 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_DBGF);
1030 bool fResumeExecution;
1031 DBGFCMDDATA CmdData = pUVCpu->dbgf.s.DbgfCmdData;
1032 DBGFCMD enmCmd = dbgfR3CpuGetCmd(pUVCpu);
1033 int rc = dbgfR3CpuCmd(pVCpu, enmCmd, &CmdData, &fResumeExecution);
1034 if (fResumeExecution)
1035 {
1036 if (RT_FAILURE(rc))
1037 rcRet = rc;
1038 else if ( rc >= VINF_EM_FIRST
1039 && rc <= VINF_EM_LAST
1040 && (rc < rcRet || rcRet == VINF_SUCCESS))
1041 rcRet = rc;
1042 LogFlow(("dbgfR3CpuWait: returns %Rrc\n", rcRet));
1043 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
1044 return rcRet;
1045 }
1046 }
1047}
1048
1049
1050/**
1051 * Executes command from debugger.
1052 *
1053 * The caller is responsible for waiting or resuming execution based on the
1054 * value returned in the *pfResumeExecution indicator.
1055 *
1056 * @returns VBox status code. (clearify!)
1057 * @param pVCpu The cross context vCPU structure.
1058 * @param enmCmd The command in question.
1059 * @param pCmdData Pointer to the command data.
1060 * @param pfResumeExecution Where to store the resume execution / continue waiting indicator.
1061 */
1062static int dbgfR3CpuCmd(PVMCPU pVCpu, DBGFCMD enmCmd, PDBGFCMDDATA pCmdData, bool *pfResumeExecution)
1063{
1064 RT_NOREF(pCmdData); /* for later */
1065
1066 /*
1067 * The cases in this switch returns directly if no event to send.
1068 */
1069 DBGFEVENTTYPE enmEvent;
1070 DBGFEVENTCTX enmCtx = DBGFEVENTCTX_INVALID;
1071 switch (enmCmd)
1072 {
1073 /*
1074 * Halt is answered by an event say that we've halted.
1075 */
1076 case DBGFCMD_HALT:
1077 {
1078 *pfResumeExecution = false;
1079 enmEvent = DBGFEVENT_HALT_DONE;
1080 break;
1081 }
1082
1083
1084 /*
1085 * Resume is not answered, we just resume execution.
1086 */
1087 case DBGFCMD_GO:
1088 {
1089 pVCpu->dbgf.s.fSingleSteppingRaw = false;
1090 *pfResumeExecution = true;
1091 return VINF_SUCCESS;
1092 }
1093
1094 /** @todo implement (and define) the rest of the commands. */
1095
1096 /*
1097 * Single step, with trace into.
1098 */
1099 case DBGFCMD_SINGLE_STEP:
1100 {
1101 Log2(("Single step\n"));
1102 PVM pVM = pVCpu->CTX_SUFF(pVM);
1103 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_OVER)
1104 {
1105 if (dbgfStepGetCurInstrType(pVM, pVCpu) == DBGFSTEPINSTRTYPE_CALL)
1106 pVM->dbgf.s.SteppingFilter.uCallDepth++;
1107 }
1108 if (pVM->dbgf.s.SteppingFilter.cMaxSteps > 0)
1109 {
1110 pVCpu->dbgf.s.fSingleSteppingRaw = true;
1111 *pfResumeExecution = true;
1112 return VINF_EM_DBG_STEP;
1113 }
1114 /* Stop after zero steps. Nonsense, but whatever. */
1115 pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID;
1116 *pfResumeExecution = false;
1117 enmCtx = dbgfR3FigureEventCtx(pVCpu);
1118 enmEvent = enmCtx != DBGFEVENTCTX_HYPER ? DBGFEVENT_STEPPED : DBGFEVENT_STEPPED_HYPER;
1119 break;
1120 }
1121
1122 /*
1123 * Default is to send an invalid command event.
1124 */
1125 default:
1126 {
1127 *pfResumeExecution = false;
1128 enmEvent = DBGFEVENT_INVALID_COMMAND;
1129 break;
1130 }
1131 }
1132
1133 /*
1134 * Send the pending event.
1135 */
1136 Log2(("DBGF: Emulation thread: sending event %d\n", enmEvent));
1137 int rc = dbgfR3SendEventNoWait(pVCpu->CTX_SUFF(pVM), pVCpu, enmEvent, enmCtx);
1138 AssertRCStmt(rc, *pfResumeExecution = true);
1139 return rc;
1140}
1141
1142
1143/**
1144 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
1145 * EMT rendezvous worker for DBGFR3Attach - only called on one EMT.}
1146 */
1147static DECLCALLBACK(VBOXSTRICTRC) dbgfR3Attach(PVM pVM, PVMCPU pVCpu, void *pvUser)
1148{
1149 PUVM pUVM = pVM->pUVM;
1150 int *prcAttach = (int *)pvUser;
1151 RT_NOREF(pVCpu);
1152
1153 if (pVM->dbgf.s.fAttached)
1154 {
1155 Log(("dbgfR3Attach: Debugger already attached\n"));
1156 *prcAttach = VERR_DBGF_ALREADY_ATTACHED;
1157 return VINF_SUCCESS;
1158 }
1159
1160 /*
1161 * The per-CPU bits.
1162 */
1163 for (uint32_t i = 0; i < pUVM->cCpus; i++)
1164 {
1165 PUVMCPU pUVCpu = &pUVM->aCpus[i];
1166
1167 pUVCpu->dbgf.s.enmDbgfCmd = DBGFCMD_NO_COMMAND;
1168 RT_ZERO(pUVCpu->dbgf.s.DbgfCmdData);
1169 }
1170
1171 /*
1172 * Init of the VM -> Debugger communication part living in the global VM structure.
1173 */
1174 pUVM->dbgf.s.cDbgEvtMax = pVM->cCpus * 5 + 10; /* Initial size of event ring, increased when being full. */
1175 pUVM->dbgf.s.idxDbgEvtWrite = 0;
1176 pUVM->dbgf.s.idxDbgEvtRead = 0;
1177 pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
1178 pUVM->dbgf.s.hEvtRingBufFull = NIL_RTSEMEVENTMULTI;
1179 pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
1180 int rc;
1181 pUVM->dbgf.s.paDbgEvts = (PDBGFEVENT)MMR3HeapAllocU(pUVM, MM_TAG_DBGF, pUVM->dbgf.s.cDbgEvtMax * sizeof(DBGFEVENT));
1182 if (pUVM->dbgf.s.paDbgEvts)
1183 {
1184 rc = RTSemEventCreate(&pUVM->dbgf.s.hEvtWait);
1185 if (RT_SUCCESS(rc))
1186 {
1187 rc = RTSemFastMutexCreate(&pUVM->dbgf.s.hMtxDbgEvtWr);
1188 if (RT_SUCCESS(rc))
1189 {
1190 rc = RTSemEventMultiCreate(&pUVM->dbgf.s.hEvtRingBufFull);
1191 if (RT_SUCCESS(rc))
1192 {
1193 /*
1194 * At last, set the attached flag.
1195 */
1196 ASMAtomicWriteBool(&pVM->dbgf.s.fAttached, true);
1197 *prcAttach = VINF_SUCCESS;
1198 return VINF_SUCCESS;
1199 }
1200
1201 RTSemFastMutexDestroy(pUVM->dbgf.s.hMtxDbgEvtWr);
1202 pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
1203 }
1204 RTSemEventDestroy(pUVM->dbgf.s.hEvtWait);
1205 pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
1206 }
1207 }
1208 else
1209 rc = VERR_NO_MEMORY;
1210
1211 *prcAttach = rc;
1212 return VINF_SUCCESS;
1213}
1214
1215
1216/**
1217 * Attaches a debugger to the specified VM.
1218 *
1219 * Only one debugger at a time.
1220 *
1221 * @returns VBox status code.
1222 * @param pUVM The user mode VM handle.
1223 */
1224VMMR3DECL(int) DBGFR3Attach(PUVM pUVM)
1225{
1226 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1227 PVM pVM = pUVM->pVM;
1228 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1229
1230 /*
1231 * Call the VM, use EMT rendezvous for serialization.
1232 */
1233 int rcAttach = VERR_IPE_UNINITIALIZED_STATUS;
1234 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE | VMMEMTRENDEZVOUS_FLAGS_PRIORITY, dbgfR3Attach, &rcAttach);
1235 if (RT_SUCCESS(rc))
1236 rc = rcAttach;
1237
1238 return rc;
1239}
1240
1241
1242/**
1243 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
1244 * EMT rendezvous worker for DBGFR3Detach - called on all EMTs (why?).}
1245 */
1246static DECLCALLBACK(VBOXSTRICTRC) dbgfR3Detach(PVM pVM, PVMCPU pVCpu, void *pvUser)
1247{
1248 if (pVCpu->idCpu == 0)
1249 {
1250 PUVM pUVM = (PUVM)pvUser;
1251
1252 /*
1253 * Per-CPU cleanup.
1254 */
1255 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
1256 {
1257 PUVMCPU pUVCpu = &pUVM->aCpus[i];
1258
1259 pUVCpu->dbgf.s.enmDbgfCmd = DBGFCMD_NO_COMMAND;
1260 RT_ZERO(pUVCpu->dbgf.s.DbgfCmdData);
1261 }
1262
1263 /*
1264 * De-init of the VM -> Debugger communication part living in the global VM structure.
1265 */
1266 if (pUVM->dbgf.s.paDbgEvts)
1267 {
1268 MMR3HeapFree(pUVM->dbgf.s.paDbgEvts);
1269 pUVM->dbgf.s.paDbgEvts = NULL;
1270 }
1271
1272 if (pUVM->dbgf.s.hEvtWait != NIL_RTSEMEVENT)
1273 {
1274 RTSemEventDestroy(pUVM->dbgf.s.hEvtWait);
1275 pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
1276 }
1277
1278 if (pUVM->dbgf.s.hMtxDbgEvtWr != NIL_RTSEMFASTMUTEX)
1279 {
1280 RTSemFastMutexDestroy(pUVM->dbgf.s.hMtxDbgEvtWr);
1281 pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
1282 }
1283
1284 if (pUVM->dbgf.s.hEvtRingBufFull != NIL_RTSEMEVENTMULTI)
1285 {
1286 RTSemEventMultiDestroy(pUVM->dbgf.s.hEvtRingBufFull);
1287 pUVM->dbgf.s.hEvtRingBufFull = NIL_RTSEMEVENTMULTI;
1288 }
1289
1290 pUVM->dbgf.s.cDbgEvtMax = 0;
1291 pUVM->dbgf.s.idxDbgEvtWrite = 0;
1292 pUVM->dbgf.s.idxDbgEvtRead = 0;
1293 pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
1294 pUVM->dbgf.s.hEvtRingBufFull = NIL_RTSEMEVENTMULTI;
1295 pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
1296
1297 ASMAtomicWriteBool(&pVM->dbgf.s.fAttached, false);
1298 }
1299
1300 return VINF_SUCCESS;
1301}
1302
1303
1304/**
1305 * Detaches a debugger from the specified VM.
1306 *
1307 * Caller must be attached to the VM.
1308 *
1309 * @returns VBox status code.
1310 * @param pUVM The user mode VM handle.
1311 */
1312VMMR3DECL(int) DBGFR3Detach(PUVM pUVM)
1313{
1314 LogFlow(("DBGFR3Detach:\n"));
1315
1316 /*
1317 * Validate input. The UVM handle shall be valid, the VM handle might be
1318 * in the processes of being destroyed already, so deal quietly with that.
1319 */
1320 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1321 PVM pVM = pUVM->pVM;
1322 if (!VM_IS_VALID_EXT(pVM))
1323 return VERR_INVALID_VM_HANDLE;
1324
1325 /*
1326 * Check if attached.
1327 */
1328 if (!pVM->dbgf.s.fAttached)
1329 return VERR_DBGF_NOT_ATTACHED;
1330
1331 return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE | VMMEMTRENDEZVOUS_FLAGS_PRIORITY, dbgfR3Detach, pUVM);
1332}
1333
1334
1335/**
1336 * Wait for a debug event.
1337 *
1338 * @returns VBox status code. Will not return VBOX_INTERRUPTED.
1339 * @param pUVM The user mode VM handle.
1340 * @param cMillies Number of millis to wait.
1341 * @param pEvent Where to store the event data.
1342 */
1343VMMR3DECL(int) DBGFR3EventWait(PUVM pUVM, RTMSINTERVAL cMillies, PDBGFEVENT pEvent)
1344{
1345 /*
1346 * Check state.
1347 */
1348 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1349 PVM pVM = pUVM->pVM;
1350 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1351 AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
1352
1353 RT_BZERO(pEvent, sizeof(*pEvent));
1354
1355 /*
1356 * Wait for an event to arrive if there are none.
1357 */
1358 int rc = VINF_SUCCESS;
1359 uint32_t idxDbgEvtRead = ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtRead);
1360 if (idxDbgEvtRead == ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite))
1361 {
1362 do
1363 {
1364 rc = RTSemEventWait(pUVM->dbgf.s.hEvtWait, cMillies);
1365 } while ( RT_SUCCESS(rc)
1366 && idxDbgEvtRead == ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite));
1367 }
1368
1369 if (RT_SUCCESS(rc))
1370 {
1371 Assert(idxDbgEvtRead != ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite));
1372
1373 uint32_t const cDbgEvtMax = RT_MAX(1, pUVM->dbgf.s.cDbgEvtMax);
1374 memcpy(pEvent, &pUVM->dbgf.s.paDbgEvts[idxDbgEvtRead % cDbgEvtMax], sizeof(*pEvent));
1375 ASMAtomicWriteU32(&pUVM->dbgf.s.idxDbgEvtRead, (idxDbgEvtRead + 1) % cDbgEvtMax);
1376 }
1377
1378 Log2(("DBGFR3EventWait: rc=%Rrc (event type %d)\n", rc, pEvent->enmType));
1379 return rc;
1380}
1381
1382
1383/**
1384 * Halts VM execution.
1385 *
1386 * After calling this the VM isn't actually halted till an DBGFEVENT_HALT_DONE
1387 * arrives. Until that time it's not possible to issue any new commands.
1388 *
1389 * @returns VBox status code.
1390 * @retval VWRN_DBGF_ALREADY_HALTED if @a idCpu is VMCPUID_ALL and all vCPUs
1391 * are halted.
1392 * @param pUVM The user mode VM handle.
1393 * @param idCpu The vCPU to halt, VMCPUID_ALL halts all still running vCPUs.
1394 */
1395VMMR3DECL(int) DBGFR3Halt(PUVM pUVM, VMCPUID idCpu)
1396{
1397 /*
1398 * Check state.
1399 */
1400 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1401 PVM pVM = pUVM->pVM;
1402 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1403 AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
1404 AssertReturn(idCpu == VMCPUID_ALL || idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
1405
1406 /*
1407 * Halt the requested CPUs as needed.
1408 */
1409 int rc;
1410 if (idCpu != VMCPUID_ALL)
1411 {
1412 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
1413 if (!dbgfR3CpuIsHalted(pUVCpu))
1414 {
1415 dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_HALT);
1416 rc = VINF_SUCCESS;
1417 }
1418 else
1419 rc = VWRN_DBGF_ALREADY_HALTED;
1420 }
1421 else
1422 {
1423 rc = VWRN_DBGF_ALREADY_HALTED;
1424 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
1425 {
1426 PUVMCPU pUVCpu = &pUVM->aCpus[i];
1427 if (!dbgfR3CpuIsHalted(pUVCpu))
1428 {
1429 dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_HALT);
1430 rc = VINF_SUCCESS;
1431 }
1432 }
1433 }
1434
1435 return rc;
1436}
1437
1438
1439/**
1440 * Checks if any of the specified vCPUs have been halted by the debugger.
1441 *
1442 * @returns True if at least one halted vCPUs.
1443 * @returns False if no halted vCPUs.
1444 * @param pUVM The user mode VM handle.
1445 * @param idCpu The CPU id to check for, VMCPUID_ALL will return true if
1446 * at least a single vCPU is halted in the debugger.
1447 */
1448VMMR3DECL(bool) DBGFR3IsHalted(PUVM pUVM, VMCPUID idCpu)
1449{
1450 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
1451 PVM pVM = pUVM->pVM;
1452 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
1453 AssertReturn(pVM->dbgf.s.fAttached, false);
1454
1455 return dbgfR3CpuAreAnyHaltedByCpuId(pUVM, idCpu);
1456}
1457
1458
1459/**
1460 * Checks if the debugger can wait for events or not.
1461 *
1462 * This function is only used by lazy, multiplexing debuggers. :-)
1463 *
1464 * @returns VBox status code.
1465 * @retval VINF_SUCCESS if waitable.
1466 * @retval VERR_SEM_OUT_OF_TURN if not waitable.
1467 * @retval VERR_INVALID_VM_HANDLE if the VM is being (/ has been) destroyed
1468 * (not asserted) or if the handle is invalid (asserted).
1469 * @retval VERR_DBGF_NOT_ATTACHED if not attached.
1470 *
1471 * @param pUVM The user mode VM handle.
1472 */
1473VMMR3DECL(int) DBGFR3QueryWaitable(PUVM pUVM)
1474{
1475 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1476
1477 /* Note! There is a slight race here, unfortunately. */
1478 PVM pVM = pUVM->pVM;
1479 if (!RT_VALID_PTR(pVM))
1480 return VERR_INVALID_VM_HANDLE;
1481 if (pVM->enmVMState >= VMSTATE_DESTROYING)
1482 return VERR_INVALID_VM_HANDLE;
1483 if (!pVM->dbgf.s.fAttached)
1484 return VERR_DBGF_NOT_ATTACHED;
1485
1486 /** @todo was: if (!RTSemPongShouldWait(...)) return VERR_SEM_OUT_OF_TURN; */
1487 return VINF_SUCCESS;
1488}
1489
1490
1491/**
1492 * Resumes VM execution.
1493 *
1494 * There is no receipt event on this command.
1495 *
1496 * @returns VBox status code.
1497 * @retval VWRN_DBGF_ALREADY_RUNNING if the specified vCPUs are all running.
1498 * @param pUVM The user mode VM handle.
1499 * @param idCpu The vCPU to resume, VMCPUID_ALL resumes all still halted vCPUs.
1500 */
1501VMMR3DECL(int) DBGFR3Resume(PUVM pUVM, VMCPUID idCpu)
1502{
1503 /*
1504 * Validate input and attachment state.
1505 */
1506 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1507 PVM pVM = pUVM->pVM;
1508 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1509 AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
1510
1511 /*
1512 * Ping the halted emulation threads, telling them to run.
1513 */
1514 int rc = VWRN_DBGF_ALREADY_RUNNING;
1515 if (idCpu != VMCPUID_ALL)
1516 {
1517 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
1518 if (dbgfR3CpuIsHalted(pUVCpu))
1519 {
1520 rc = dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_GO);
1521 AssertRC(rc);
1522 }
1523 }
1524 else
1525 {
1526 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
1527 {
1528 PUVMCPU pUVCpu = &pUVM->aCpus[i];
1529 if (dbgfR3CpuIsHalted(pUVCpu))
1530 {
1531 int rc2 = dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_GO);
1532 AssertRC(rc2);
1533 if (rc == VWRN_DBGF_ALREADY_RUNNING || RT_FAILURE(rc2))
1534 rc = rc2;
1535 }
1536 }
1537 }
1538
1539 return rc;
1540}
1541
1542
1543/**
1544 * Classifies the current instruction.
1545 *
1546 * @returns Type of instruction.
1547 * @param pVM The cross context VM structure.
1548 * @param pVCpu The current CPU.
1549 * @thread EMT(pVCpu)
1550 */
1551static DBGFSTEPINSTRTYPE dbgfStepGetCurInstrType(PVM pVM, PVMCPU pVCpu)
1552{
1553 /*
1554 * Read the instruction.
1555 */
1556 size_t cbRead = 0;
1557 uint8_t abOpcode[16] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1558 int rc = PGMR3DbgReadGCPtr(pVM, abOpcode, CPUMGetGuestFlatPC(pVCpu), sizeof(abOpcode) - 1, 0 /*fFlags*/, &cbRead);
1559 if (RT_SUCCESS(rc))
1560 {
1561 /*
1562 * Do minimal parsing. No real need to involve the disassembler here.
1563 */
1564 uint8_t *pb = abOpcode;
1565 for (;;)
1566 {
1567 switch (*pb++)
1568 {
1569 default:
1570 return DBGFSTEPINSTRTYPE_OTHER;
1571
1572 case 0xe8: /* call rel16/32 */
1573 case 0x9a: /* call farptr */
1574 case 0xcc: /* int3 */
1575 case 0xcd: /* int xx */
1576 // case 0xce: /* into */
1577 return DBGFSTEPINSTRTYPE_CALL;
1578
1579 case 0xc2: /* ret xx */
1580 case 0xc3: /* ret */
1581 case 0xca: /* retf xx */
1582 case 0xcb: /* retf */
1583 case 0xcf: /* iret */
1584 return DBGFSTEPINSTRTYPE_RET;
1585
1586 case 0xff:
1587 if ( ((*pb >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) == 2 /* call indir */
1588 || ((*pb >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) == 3) /* call indir-farptr */
1589 return DBGFSTEPINSTRTYPE_CALL;
1590 return DBGFSTEPINSTRTYPE_OTHER;
1591
1592 case 0x0f:
1593 switch (*pb++)
1594 {
1595 case 0x05: /* syscall */
1596 case 0x34: /* sysenter */
1597 return DBGFSTEPINSTRTYPE_CALL;
1598 case 0x07: /* sysret */
1599 case 0x35: /* sysexit */
1600 return DBGFSTEPINSTRTYPE_RET;
1601 }
1602 break;
1603
1604 /* Must handle some REX prefixes. So we do all normal prefixes. */
1605 case 0x40: case 0x41: case 0x42: case 0x43: case 0x44: case 0x45: case 0x46: case 0x47:
1606 case 0x48: case 0x49: case 0x4a: case 0x4b: case 0x4c: case 0x4d: case 0x4e: case 0x4f:
1607 if (!CPUMIsGuestIn64BitCode(pVCpu))
1608 return DBGFSTEPINSTRTYPE_OTHER;
1609 break;
1610
1611 case 0x2e: /* CS */
1612 case 0x36: /* SS */
1613 case 0x3e: /* DS */
1614 case 0x26: /* ES */
1615 case 0x64: /* FS */
1616 case 0x65: /* GS */
1617 case 0x66: /* op size */
1618 case 0x67: /* addr size */
1619 case 0xf0: /* lock */
1620 case 0xf2: /* REPNZ */
1621 case 0xf3: /* REPZ */
1622 break;
1623 }
1624 }
1625 }
1626
1627 return DBGFSTEPINSTRTYPE_INVALID;
1628}
1629
1630
1631/**
1632 * Checks if the stepping has reached a stop point.
1633 *
1634 * Called when raising a stepped event.
1635 *
1636 * @returns true if the event should be raised, false if we should take one more
1637 * step first.
1638 * @param pVM The cross context VM structure.
1639 * @param pVCpu The cross context per CPU structure of the calling EMT.
1640 * @thread EMT(pVCpu)
1641 */
1642static bool dbgfStepAreWeThereYet(PVM pVM, PVMCPU pVCpu)
1643{
1644 /*
1645 * Check valid pVCpu and that it matches the CPU one stepping.
1646 */
1647 if (pVCpu)
1648 {
1649 if (pVCpu->idCpu == pVM->dbgf.s.SteppingFilter.idCpu)
1650 {
1651 /*
1652 * Increase the number of steps and see if we've reached the max.
1653 */
1654 pVM->dbgf.s.SteppingFilter.cSteps++;
1655 if (pVM->dbgf.s.SteppingFilter.cSteps < pVM->dbgf.s.SteppingFilter.cMaxSteps)
1656 {
1657 /*
1658 * Check PC and SP address filtering.
1659 */
1660 if (pVM->dbgf.s.SteppingFilter.fFlags & (DBGF_STEP_F_STOP_ON_ADDRESS | DBGF_STEP_F_STOP_ON_STACK_POP))
1661 {
1662 if ( (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_ADDRESS)
1663 && pVM->dbgf.s.SteppingFilter.AddrPc == CPUMGetGuestFlatPC(pVCpu))
1664 return true;
1665 if ( (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_STACK_POP)
1666 && CPUMGetGuestFlatSP(pVCpu) - pVM->dbgf.s.SteppingFilter.AddrStackPop
1667 < pVM->dbgf.s.SteppingFilter.cbStackPop)
1668 return true;
1669 }
1670
1671 /*
1672 * Do step-over filtering separate from the step-into one.
1673 */
1674 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_OVER)
1675 {
1676 DBGFSTEPINSTRTYPE enmType = dbgfStepGetCurInstrType(pVM, pVCpu);
1677 switch (enmType)
1678 {
1679 default:
1680 if ( pVM->dbgf.s.SteppingFilter.uCallDepth != 0
1681 || (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_FILTER_MASK))
1682 break;
1683 return true;
1684 case DBGFSTEPINSTRTYPE_CALL:
1685 if ( (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_CALL)
1686 && pVM->dbgf.s.SteppingFilter.uCallDepth == 0)
1687 return true;
1688 pVM->dbgf.s.SteppingFilter.uCallDepth++;
1689 break;
1690 case DBGFSTEPINSTRTYPE_RET:
1691 if (pVM->dbgf.s.SteppingFilter.uCallDepth == 0)
1692 {
1693 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_RET)
1694 return true;
1695 /* If after return, we use the cMaxStep limit to stop the next time. */
1696 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_AFTER_RET)
1697 pVM->dbgf.s.SteppingFilter.cMaxSteps = pVM->dbgf.s.SteppingFilter.cSteps + 1;
1698 }
1699 else if (pVM->dbgf.s.SteppingFilter.uCallDepth > 0)
1700 pVM->dbgf.s.SteppingFilter.uCallDepth--;
1701 break;
1702 }
1703 return false;
1704 }
1705 /*
1706 * Filtered step-into.
1707 */
1708 else if ( pVM->dbgf.s.SteppingFilter.fFlags
1709 & (DBGF_STEP_F_STOP_ON_CALL | DBGF_STEP_F_STOP_ON_RET | DBGF_STEP_F_STOP_AFTER_RET))
1710 {
1711 DBGFSTEPINSTRTYPE enmType = dbgfStepGetCurInstrType(pVM, pVCpu);
1712 switch (enmType)
1713 {
1714 default:
1715 break;
1716 case DBGFSTEPINSTRTYPE_CALL:
1717 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_CALL)
1718 return true;
1719 break;
1720 case DBGFSTEPINSTRTYPE_RET:
1721 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_RET)
1722 return true;
1723 /* If after return, we use the cMaxStep limit to stop the next time. */
1724 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_AFTER_RET)
1725 pVM->dbgf.s.SteppingFilter.cMaxSteps = pVM->dbgf.s.SteppingFilter.cSteps + 1;
1726 break;
1727 }
1728 return false;
1729 }
1730 }
1731 }
1732 }
1733
1734 return true;
1735}
1736
1737
1738/**
1739 * Step Into.
1740 *
1741 * A single step event is generated from this command.
1742 * The current implementation is not reliable, so don't rely on the event coming.
1743 *
1744 * @returns VBox status code.
1745 * @param pUVM The user mode VM handle.
1746 * @param idCpu The ID of the CPU to single step on.
1747 */
1748VMMR3DECL(int) DBGFR3Step(PUVM pUVM, VMCPUID idCpu)
1749{
1750 return DBGFR3StepEx(pUVM, idCpu, DBGF_STEP_F_INTO, NULL, NULL, 0, 1);
1751}
1752
1753
1754/**
1755 * Full fleged step.
1756 *
1757 * This extended stepping API allows for doing multiple steps before raising an
1758 * event, helping implementing step over, step out and other more advanced
1759 * features.
1760 *
1761 * Like the DBGFR3Step() API, this will normally generate a DBGFEVENT_STEPPED or
1762 * DBGFEVENT_STEPPED_EVENT. However the stepping may be interrupted by other
1763 * events, which will abort the stepping.
1764 *
1765 * The stop on pop area feature is for safeguarding step out.
1766 *
1767 * Please note though, that it will always use stepping and never breakpoints.
1768 * While this allows for a much greater flexibility it can at times be rather
1769 * slow.
1770 *
1771 * @returns VBox status code.
1772 * @param pUVM The user mode VM handle.
1773 * @param idCpu The ID of the CPU to single step on.
1774 * @param fFlags Flags controlling the stepping, DBGF_STEP_F_XXX.
1775 * Either DBGF_STEP_F_INTO or DBGF_STEP_F_OVER must
1776 * always be specified.
1777 * @param pStopPcAddr Address to stop executing at. Completely ignored
1778 * unless DBGF_STEP_F_STOP_ON_ADDRESS is specified.
1779 * @param pStopPopAddr Stack address that SP must be lower than when
1780 * performing DBGF_STEP_F_STOP_ON_STACK_POP filtering.
1781 * @param cbStopPop The range starting at @a pStopPopAddr which is
1782 * considered to be within the same thread stack. Note
1783 * that the API allows @a pStopPopAddr and @a cbStopPop
1784 * to form an area that wraps around and it will
1785 * consider the part starting at 0 as included.
1786 * @param cMaxSteps The maximum number of steps to take. This is to
1787 * prevent stepping for ever, so passing UINT32_MAX is
1788 * not recommended.
1789 *
1790 * @remarks The two address arguments must be guest context virtual addresses,
1791 * or HMA. The code doesn't make much of a point of out HMA, though.
1792 */
1793VMMR3DECL(int) DBGFR3StepEx(PUVM pUVM, VMCPUID idCpu, uint32_t fFlags, PCDBGFADDRESS pStopPcAddr,
1794 PCDBGFADDRESS pStopPopAddr, RTGCUINTPTR cbStopPop, uint32_t cMaxSteps)
1795{
1796 /*
1797 * Check state.
1798 */
1799 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1800 PVM pVM = pUVM->pVM;
1801 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1802 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_PARAMETER);
1803 AssertReturn(!(fFlags & ~DBGF_STEP_F_VALID_MASK), VERR_INVALID_FLAGS);
1804 AssertReturn(RT_BOOL(fFlags & DBGF_STEP_F_INTO) != RT_BOOL(fFlags & DBGF_STEP_F_OVER), VERR_INVALID_FLAGS);
1805 if (fFlags & DBGF_STEP_F_STOP_ON_ADDRESS)
1806 {
1807 AssertReturn(RT_VALID_PTR(pStopPcAddr), VERR_INVALID_POINTER);
1808 AssertReturn(DBGFADDRESS_IS_VALID(pStopPcAddr), VERR_INVALID_PARAMETER);
1809 AssertReturn(DBGFADDRESS_IS_VIRT_GC(pStopPcAddr), VERR_INVALID_PARAMETER);
1810 }
1811 AssertReturn(!(fFlags & DBGF_STEP_F_STOP_ON_STACK_POP) || RT_VALID_PTR(pStopPopAddr), VERR_INVALID_POINTER);
1812 if (fFlags & DBGF_STEP_F_STOP_ON_STACK_POP)
1813 {
1814 AssertReturn(RT_VALID_PTR(pStopPopAddr), VERR_INVALID_POINTER);
1815 AssertReturn(DBGFADDRESS_IS_VALID(pStopPopAddr), VERR_INVALID_PARAMETER);
1816 AssertReturn(DBGFADDRESS_IS_VIRT_GC(pStopPopAddr), VERR_INVALID_PARAMETER);
1817 AssertReturn(cbStopPop > 0, VERR_INVALID_PARAMETER);
1818 }
1819
1820 AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
1821 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
1822 if (RT_LIKELY(dbgfR3CpuIsHalted(pUVCpu)))
1823 { /* likely */ }
1824 else
1825 return VERR_SEM_OUT_OF_TURN;
1826 Assert(pVM->dbgf.s.SteppingFilter.idCpu == NIL_VMCPUID);
1827
1828 /*
1829 * Send the emulation thread a single-step command.
1830 */
1831 if (fFlags == DBGF_STEP_F_INTO)
1832 pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID;
1833 else
1834 pVM->dbgf.s.SteppingFilter.idCpu = idCpu;
1835 pVM->dbgf.s.SteppingFilter.fFlags = fFlags;
1836 if (fFlags & DBGF_STEP_F_STOP_ON_ADDRESS)
1837 pVM->dbgf.s.SteppingFilter.AddrPc = pStopPcAddr->FlatPtr;
1838 else
1839 pVM->dbgf.s.SteppingFilter.AddrPc = 0;
1840 if (fFlags & DBGF_STEP_F_STOP_ON_STACK_POP)
1841 {
1842 pVM->dbgf.s.SteppingFilter.AddrStackPop = pStopPopAddr->FlatPtr;
1843 pVM->dbgf.s.SteppingFilter.cbStackPop = cbStopPop;
1844 }
1845 else
1846 {
1847 pVM->dbgf.s.SteppingFilter.AddrStackPop = 0;
1848 pVM->dbgf.s.SteppingFilter.cbStackPop = RTGCPTR_MAX;
1849 }
1850
1851 pVM->dbgf.s.SteppingFilter.cMaxSteps = cMaxSteps;
1852 pVM->dbgf.s.SteppingFilter.cSteps = 0;
1853 pVM->dbgf.s.SteppingFilter.uCallDepth = 0;
1854
1855 Assert(dbgfR3CpuIsHalted(pUVCpu));
1856 return dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_SINGLE_STEP);
1857}
1858
1859
1860
1861/**
1862 * dbgfR3EventConfigEx argument packet.
1863 */
1864typedef struct DBGFR3EVENTCONFIGEXARGS
1865{
1866 PCDBGFEVENTCONFIG paConfigs;
1867 size_t cConfigs;
1868 int rc;
1869} DBGFR3EVENTCONFIGEXARGS;
1870/** Pointer to a dbgfR3EventConfigEx argument packet. */
1871typedef DBGFR3EVENTCONFIGEXARGS *PDBGFR3EVENTCONFIGEXARGS;
1872
1873
1874/**
1875 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Worker for DBGFR3EventConfigEx.}
1876 */
1877static DECLCALLBACK(VBOXSTRICTRC) dbgfR3EventConfigEx(PVM pVM, PVMCPU pVCpu, void *pvUser)
1878{
1879 if (pVCpu->idCpu == 0)
1880 {
1881 PDBGFR3EVENTCONFIGEXARGS pArgs = (PDBGFR3EVENTCONFIGEXARGS)pvUser;
1882 DBGFEVENTCONFIG volatile const *paConfigs = pArgs->paConfigs;
1883 size_t cConfigs = pArgs->cConfigs;
1884
1885 /*
1886 * Apply the changes.
1887 */
1888 unsigned cChanges = 0;
1889 for (uint32_t i = 0; i < cConfigs; i++)
1890 {
1891 DBGFEVENTTYPE enmType = paConfigs[i].enmType;
1892 AssertReturn(enmType >= DBGFEVENT_FIRST_SELECTABLE && enmType < DBGFEVENT_END, VERR_INVALID_PARAMETER);
1893 if (paConfigs[i].fEnabled)
1894 cChanges += ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSelectedEvents, enmType) == false;
1895 else
1896 cChanges += ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSelectedEvents, enmType) == true;
1897 }
1898
1899 /*
1900 * Inform HM about changes.
1901 */
1902 if (cChanges > 0 && HMIsEnabled(pVM))
1903 {
1904 HMR3NotifyDebugEventChanged(pVM);
1905 HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
1906 }
1907 }
1908 else if (HMIsEnabled(pVM))
1909 HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
1910
1911 return VINF_SUCCESS;
1912}
1913
1914
1915/**
1916 * Configures (enables/disables) multiple selectable debug events.
1917 *
1918 * @returns VBox status code.
1919 * @param pUVM The user mode VM handle.
1920 * @param paConfigs The event to configure and their new state.
1921 * @param cConfigs Number of entries in @a paConfigs.
1922 */
1923VMMR3DECL(int) DBGFR3EventConfigEx(PUVM pUVM, PCDBGFEVENTCONFIG paConfigs, size_t cConfigs)
1924{
1925 /*
1926 * Validate input.
1927 */
1928 size_t i = cConfigs;
1929 while (i-- > 0)
1930 {
1931 AssertReturn(paConfigs[i].enmType >= DBGFEVENT_FIRST_SELECTABLE, VERR_INVALID_PARAMETER);
1932 AssertReturn(paConfigs[i].enmType < DBGFEVENT_END, VERR_INVALID_PARAMETER);
1933 }
1934 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1935 PVM pVM = pUVM->pVM;
1936 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1937
1938 /*
1939 * Apply the changes in EMT(0) and rendezvous with the other CPUs so they
1940 * can sync their data and execution with new debug state.
1941 */
1942 DBGFR3EVENTCONFIGEXARGS Args = { paConfigs, cConfigs, VINF_SUCCESS };
1943 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING | VMMEMTRENDEZVOUS_FLAGS_PRIORITY,
1944 dbgfR3EventConfigEx, &Args);
1945 if (RT_SUCCESS(rc))
1946 rc = Args.rc;
1947 return rc;
1948}
1949
1950
1951/**
1952 * Enables or disables a selectable debug event.
1953 *
1954 * @returns VBox status code.
1955 * @param pUVM The user mode VM handle.
1956 * @param enmEvent The selectable debug event.
1957 * @param fEnabled The new state.
1958 */
1959VMMR3DECL(int) DBGFR3EventConfig(PUVM pUVM, DBGFEVENTTYPE enmEvent, bool fEnabled)
1960{
1961 /*
1962 * Convert to an array call.
1963 */
1964 DBGFEVENTCONFIG EvtCfg = { enmEvent, fEnabled };
1965 return DBGFR3EventConfigEx(pUVM, &EvtCfg, 1);
1966}
1967
1968
1969/**
1970 * Checks if the given selectable event is enabled.
1971 *
1972 * @returns true if enabled, false if not or invalid input.
1973 * @param pUVM The user mode VM handle.
1974 * @param enmEvent The selectable debug event.
1975 * @sa DBGFR3EventQuery
1976 */
1977VMMR3DECL(bool) DBGFR3EventIsEnabled(PUVM pUVM, DBGFEVENTTYPE enmEvent)
1978{
1979 /*
1980 * Validate input.
1981 */
1982 AssertReturn( enmEvent >= DBGFEVENT_HALT_DONE
1983 && enmEvent < DBGFEVENT_END, false);
1984 Assert( enmEvent >= DBGFEVENT_FIRST_SELECTABLE
1985 || enmEvent == DBGFEVENT_BREAKPOINT
1986 || enmEvent == DBGFEVENT_BREAKPOINT_IO
1987 || enmEvent == DBGFEVENT_BREAKPOINT_MMIO);
1988
1989 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
1990 PVM pVM = pUVM->pVM;
1991 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
1992
1993 /*
1994 * Check the event status.
1995 */
1996 return ASMBitTest(&pVM->dbgf.s.bmSelectedEvents, enmEvent);
1997}
1998
1999
2000/**
2001 * Queries the status of a set of events.
2002 *
2003 * @returns VBox status code.
2004 * @param pUVM The user mode VM handle.
2005 * @param paConfigs The events to query and where to return the state.
2006 * @param cConfigs The number of elements in @a paConfigs.
2007 * @sa DBGFR3EventIsEnabled, DBGF_IS_EVENT_ENABLED
2008 */
2009VMMR3DECL(int) DBGFR3EventQuery(PUVM pUVM, PDBGFEVENTCONFIG paConfigs, size_t cConfigs)
2010{
2011 /*
2012 * Validate input.
2013 */
2014 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2015 PVM pVM = pUVM->pVM;
2016 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2017
2018 for (size_t i = 0; i < cConfigs; i++)
2019 {
2020 DBGFEVENTTYPE enmType = paConfigs[i].enmType;
2021 AssertReturn( enmType >= DBGFEVENT_HALT_DONE
2022 && enmType < DBGFEVENT_END, VERR_INVALID_PARAMETER);
2023 Assert( enmType >= DBGFEVENT_FIRST_SELECTABLE
2024 || enmType == DBGFEVENT_BREAKPOINT
2025 || enmType == DBGFEVENT_BREAKPOINT_IO
2026 || enmType == DBGFEVENT_BREAKPOINT_MMIO);
2027 paConfigs[i].fEnabled = ASMBitTest(&pVM->dbgf.s.bmSelectedEvents, paConfigs[i].enmType);
2028 }
2029
2030 return VINF_SUCCESS;
2031}
2032
2033
2034/**
2035 * dbgfR3InterruptConfigEx argument packet.
2036 */
2037typedef struct DBGFR3INTERRUPTCONFIGEXARGS
2038{
2039 PCDBGFINTERRUPTCONFIG paConfigs;
2040 size_t cConfigs;
2041 int rc;
2042} DBGFR3INTERRUPTCONFIGEXARGS;
2043/** Pointer to a dbgfR3InterruptConfigEx argument packet. */
2044typedef DBGFR3INTERRUPTCONFIGEXARGS *PDBGFR3INTERRUPTCONFIGEXARGS;
2045
2046/**
2047 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
2048 * Worker for DBGFR3InterruptConfigEx.}
2049 */
2050static DECLCALLBACK(VBOXSTRICTRC) dbgfR3InterruptConfigEx(PVM pVM, PVMCPU pVCpu, void *pvUser)
2051{
2052 if (pVCpu->idCpu == 0)
2053 {
2054 PDBGFR3INTERRUPTCONFIGEXARGS pArgs = (PDBGFR3INTERRUPTCONFIGEXARGS)pvUser;
2055 PCDBGFINTERRUPTCONFIG paConfigs = pArgs->paConfigs;
2056 size_t cConfigs = pArgs->cConfigs;
2057
2058 /*
2059 * Apply the changes.
2060 */
2061 bool fChanged = false;
2062 bool fThis;
2063 for (uint32_t i = 0; i < cConfigs; i++)
2064 {
2065 /*
2066 * Hardware interrupts.
2067 */
2068 if (paConfigs[i].enmHardState == DBGFINTERRUPTSTATE_ENABLED)
2069 {
2070 fChanged |= fThis = ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmHardIntBreakpoints, paConfigs[i].iInterrupt) == false;
2071 if (fThis)
2072 {
2073 Assert(pVM->dbgf.s.cHardIntBreakpoints < 256);
2074 pVM->dbgf.s.cHardIntBreakpoints++;
2075 }
2076 }
2077 else if (paConfigs[i].enmHardState == DBGFINTERRUPTSTATE_DISABLED)
2078 {
2079 fChanged |= fThis = ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmHardIntBreakpoints, paConfigs[i].iInterrupt) == true;
2080 if (fThis)
2081 {
2082 Assert(pVM->dbgf.s.cHardIntBreakpoints > 0);
2083 pVM->dbgf.s.cHardIntBreakpoints--;
2084 }
2085 }
2086
2087 /*
2088 * Software interrupts.
2089 */
2090 if (paConfigs[i].enmHardState == DBGFINTERRUPTSTATE_ENABLED)
2091 {
2092 fChanged |= fThis = ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSoftIntBreakpoints, paConfigs[i].iInterrupt) == false;
2093 if (fThis)
2094 {
2095 Assert(pVM->dbgf.s.cSoftIntBreakpoints < 256);
2096 pVM->dbgf.s.cSoftIntBreakpoints++;
2097 }
2098 }
2099 else if (paConfigs[i].enmSoftState == DBGFINTERRUPTSTATE_DISABLED)
2100 {
2101 fChanged |= fThis = ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSoftIntBreakpoints, paConfigs[i].iInterrupt) == true;
2102 if (fThis)
2103 {
2104 Assert(pVM->dbgf.s.cSoftIntBreakpoints > 0);
2105 pVM->dbgf.s.cSoftIntBreakpoints--;
2106 }
2107 }
2108 }
2109
2110 /*
2111 * Update the event bitmap entries.
2112 */
2113 if (pVM->dbgf.s.cHardIntBreakpoints > 0)
2114 fChanged |= ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_HARDWARE) == false;
2115 else
2116 fChanged |= ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_HARDWARE) == true;
2117
2118 if (pVM->dbgf.s.cSoftIntBreakpoints > 0)
2119 fChanged |= ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_SOFTWARE) == false;
2120 else
2121 fChanged |= ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_SOFTWARE) == true;
2122
2123 /*
2124 * Inform HM about changes.
2125 */
2126 if (fChanged && HMIsEnabled(pVM))
2127 {
2128 HMR3NotifyDebugEventChanged(pVM);
2129 HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
2130 }
2131 }
2132 else if (HMIsEnabled(pVM))
2133 HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
2134
2135 return VINF_SUCCESS;
2136}
2137
2138
2139/**
2140 * Changes
2141 *
2142 * @returns VBox status code.
2143 * @param pUVM The user mode VM handle.
2144 * @param paConfigs The events to query and where to return the state.
2145 * @param cConfigs The number of elements in @a paConfigs.
2146 * @sa DBGFR3InterruptConfigHardware, DBGFR3InterruptConfigSoftware
2147 */
2148VMMR3DECL(int) DBGFR3InterruptConfigEx(PUVM pUVM, PCDBGFINTERRUPTCONFIG paConfigs, size_t cConfigs)
2149{
2150 /*
2151 * Validate input.
2152 */
2153 size_t i = cConfigs;
2154 while (i-- > 0)
2155 {
2156 AssertReturn(paConfigs[i].enmHardState <= DBGFINTERRUPTSTATE_DONT_TOUCH, VERR_INVALID_PARAMETER);
2157 AssertReturn(paConfigs[i].enmSoftState <= DBGFINTERRUPTSTATE_DONT_TOUCH, VERR_INVALID_PARAMETER);
2158 }
2159
2160 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2161 PVM pVM = pUVM->pVM;
2162 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2163
2164 /*
2165 * Apply the changes in EMT(0) and rendezvous with the other CPUs so they
2166 * can sync their data and execution with new debug state.
2167 */
2168 DBGFR3INTERRUPTCONFIGEXARGS Args = { paConfigs, cConfigs, VINF_SUCCESS };
2169 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING | VMMEMTRENDEZVOUS_FLAGS_PRIORITY,
2170 dbgfR3InterruptConfigEx, &Args);
2171 if (RT_SUCCESS(rc))
2172 rc = Args.rc;
2173 return rc;
2174}
2175
2176
2177/**
2178 * Configures interception of a hardware interrupt.
2179 *
2180 * @returns VBox status code.
2181 * @param pUVM The user mode VM handle.
2182 * @param iInterrupt The interrupt number.
2183 * @param fEnabled Whether interception is enabled or not.
2184 * @sa DBGFR3InterruptSoftwareConfig, DBGFR3InterruptConfigEx
2185 */
2186VMMR3DECL(int) DBGFR3InterruptHardwareConfig(PUVM pUVM, uint8_t iInterrupt, bool fEnabled)
2187{
2188 /*
2189 * Convert to DBGFR3InterruptConfigEx call.
2190 */
2191 DBGFINTERRUPTCONFIG IntCfg = { iInterrupt, (uint8_t)fEnabled, DBGFINTERRUPTSTATE_DONT_TOUCH };
2192 return DBGFR3InterruptConfigEx(pUVM, &IntCfg, 1);
2193}
2194
2195
2196/**
2197 * Configures interception of a software interrupt.
2198 *
2199 * @returns VBox status code.
2200 * @param pUVM The user mode VM handle.
2201 * @param iInterrupt The interrupt number.
2202 * @param fEnabled Whether interception is enabled or not.
2203 * @sa DBGFR3InterruptHardwareConfig, DBGFR3InterruptConfigEx
2204 */
2205VMMR3DECL(int) DBGFR3InterruptSoftwareConfig(PUVM pUVM, uint8_t iInterrupt, bool fEnabled)
2206{
2207 /*
2208 * Convert to DBGFR3InterruptConfigEx call.
2209 */
2210 DBGFINTERRUPTCONFIG IntCfg = { iInterrupt, DBGFINTERRUPTSTATE_DONT_TOUCH, (uint8_t)fEnabled };
2211 return DBGFR3InterruptConfigEx(pUVM, &IntCfg, 1);
2212}
2213
2214
2215/**
2216 * Checks whether interception is enabled for a hardware interrupt.
2217 *
2218 * @returns true if enabled, false if not or invalid input.
2219 * @param pUVM The user mode VM handle.
2220 * @param iInterrupt The interrupt number.
2221 * @sa DBGFR3InterruptSoftwareIsEnabled, DBGF_IS_HARDWARE_INT_ENABLED,
2222 * DBGF_IS_SOFTWARE_INT_ENABLED
2223 */
2224VMMR3DECL(int) DBGFR3InterruptHardwareIsEnabled(PUVM pUVM, uint8_t iInterrupt)
2225{
2226 /*
2227 * Validate input.
2228 */
2229 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2230 PVM pVM = pUVM->pVM;
2231 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2232
2233 /*
2234 * Check it.
2235 */
2236 return ASMBitTest(&pVM->dbgf.s.bmHardIntBreakpoints, iInterrupt);
2237}
2238
2239
2240/**
2241 * Checks whether interception is enabled for a software interrupt.
2242 *
2243 * @returns true if enabled, false if not or invalid input.
2244 * @param pUVM The user mode VM handle.
2245 * @param iInterrupt The interrupt number.
2246 * @sa DBGFR3InterruptHardwareIsEnabled, DBGF_IS_SOFTWARE_INT_ENABLED,
2247 * DBGF_IS_HARDWARE_INT_ENABLED,
2248 */
2249VMMR3DECL(int) DBGFR3InterruptSoftwareIsEnabled(PUVM pUVM, uint8_t iInterrupt)
2250{
2251 /*
2252 * Validate input.
2253 */
2254 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2255 PVM pVM = pUVM->pVM;
2256 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2257
2258 /*
2259 * Check it.
2260 */
2261 return ASMBitTest(&pVM->dbgf.s.bmSoftIntBreakpoints, iInterrupt);
2262}
2263
2264
2265
2266/**
2267 * Call this to single step programmatically.
2268 *
2269 * You must pass down the return code to the EM loop! That's
2270 * where the actual single stepping take place (at least in the
2271 * current implementation).
2272 *
2273 * @returns VINF_EM_DBG_STEP
2274 *
2275 * @param pVCpu The cross context virtual CPU structure.
2276 *
2277 * @thread VCpu EMT
2278 * @internal
2279 */
2280VMMR3_INT_DECL(int) DBGFR3PrgStep(PVMCPU pVCpu)
2281{
2282 VMCPU_ASSERT_EMT(pVCpu);
2283
2284 pVCpu->dbgf.s.fSingleSteppingRaw = true;
2285 return VINF_EM_DBG_STEP;
2286}
2287
2288
2289/**
2290 * Inject an NMI into a running VM (only VCPU 0!)
2291 *
2292 * @returns VBox status code.
2293 * @param pUVM The user mode VM structure.
2294 * @param idCpu The ID of the CPU to inject the NMI on.
2295 */
2296VMMR3DECL(int) DBGFR3InjectNMI(PUVM pUVM, VMCPUID idCpu)
2297{
2298 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2299 PVM pVM = pUVM->pVM;
2300 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2301 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
2302
2303 /** @todo Implement generic NMI injection. */
2304 /** @todo NEM: NMI injection */
2305 if (!HMIsEnabled(pVM))
2306 return VERR_NOT_SUP_BY_NEM;
2307
2308 VMCPU_FF_SET(pVM->apCpusR3[idCpu], VMCPU_FF_INTERRUPT_NMI);
2309 return VINF_SUCCESS;
2310}
2311
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette